ngram
listlengths
0
67.8k
[ "# -*- coding: utf-8 -*- __author = 'Musker.Chao' __version = '0.2.2' from .jdb", "coding: utf-8 -*- __author = 'Musker.Chao' __version = '0.2.2' from .jdb import NoSql", "-*- coding: utf-8 -*- __author = 'Musker.Chao' __version = '0.2.2' from .jdb import" ]
[ "[ ('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage', name='name', field=models.CharField(blank=True, default='', max_length=500),", "Django 3.1.4 on 2020-12-01 15:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "# Generated by Django 3.1.4 on 2020-12-01 15:25 from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage', name='name',", "'0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage', name='name', field=models.CharField(blank=True, default='', max_length=500), ), ]", "2020-12-01 15:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core',", "by Django 3.1.4 on 2020-12-01 15:25 from django.db import migrations, models class Migration(migrations.Migration):", "('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage', name='name', field=models.CharField(blank=True, default='', max_length=500), ),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations =", "dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage', name='name', field=models.CharField(blank=True,", "migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations = [", "on 2020-12-01 15:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage',", "= [ ('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField( model_name='datapackage', name='name', field=models.CharField(blank=True, default='',", "15:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'),", "3.1.4 on 2020-12-01 15:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "Generated by Django 3.1.4 on 2020-12-01 15:25 from django.db import migrations, models class", "models class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations = [ migrations.AlterField(", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ] operations", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0006_suggestions_from_django_doctor'), ]" ]
[ "message.channel.name ): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore if lc:", "payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) )", "automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload:", "roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"]", "import AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog from disnake.http import Route from", "return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id,", "import Cog from disnake.http import Route from src.impl.bot import Bot CHANNELS = [int(c)", "disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog from disnake.http import Route", "message: Message) -> None: if message.channel.id not in CHANNELS: return if not isinstance(message.channel,", "message.attachments or message.embeds or message.components or message.content != message.channel.name ): await message.delete() lc:", "def on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"] not in CHANNELS: return message_id", "environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot) -> None: self.bot", "payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) ) def setup(bot: Bot)", "from disnake.ext.commands import Cog from disnake.http import Route from src.impl.bot import Bot CHANNELS", "dict) -> None: if payload[\"channel_id\"] not in CHANNELS: return message_id = payload[\"message_id\"] channel_id", "or message.attachments or message.embeds or message.components or message.content != message.channel.name ): await message.delete()", "from src.impl.bot import Bot CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS =", "{message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async", "@Cog.listener() async def on_message(self, message: Message) -> None: if message.channel.id not in CHANNELS:", "payload[\"channel_id\"] not in CHANNELS: return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request(", "Bot) -> None: self.bot = bot @Cog.listener() async def on_message(self, message: Message) ->", "payload: dict) -> None: if payload[\"channel_id\"] not in CHANNELS: return message_id = payload[\"message_id\"]", "!= message.channel.name ): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore if", "if ( message.stickers or message.attachments or message.embeds or message.components or message.content != message.channel.name", "not in CHANNELS: return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route(", "message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore if lc: await lc.send( f\"Message", "<gh_stars>1-10 from os import environ from disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands", "CHANNELS: return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\",", "message.content != message.channel.name ): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore", "message.channel.id not in CHANNELS: return if not isinstance(message.channel, TextChannel): return if ( message.stickers", "in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload: dict) ->", "self.bot = bot @Cog.listener() async def on_message(self, message: Message) -> None: if message.channel.id", "TextChannel): return if ( message.stickers or message.attachments or message.embeds or message.components or message.content", "been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self,", "# type: ignore if lc: await lc.send( f\"Message from {message.author.mention} has been automatically", "f\"Message from {message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), )", "return if ( message.stickers or message.attachments or message.embeds or message.components or message.content !=", "def __init__(self, bot: Bot) -> None: self.bot = bot @Cog.listener() async def on_message(self,", "TextChannel from disnake.ext.commands import Cog from disnake.http import Route from src.impl.bot import Bot", "async def on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"] not in CHANNELS: return", "await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) ) def setup(bot: Bot) ->", "from disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog from disnake.http import", "environ from disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog from disnake.http", "message.embeds or message.components or message.content != message.channel.name ): await message.delete() lc: TextChannel =", "lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore if lc: await lc.send( f\"Message from", "src.impl.bot import Bot CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"])", "= payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) ) def setup(bot:", "import environ from disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog from", "c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot) ->", ") @Cog.listener() async def on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"] not in", "= [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self,", "from {message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener()", "has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def", "class AutoMod(Cog): def __init__(self, bot: Bot) -> None: self.bot = bot @Cog.listener() async", "): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore if lc: await", "= self.bot.get_channel(LOGS) # type: ignore if lc: await lc.send( f\"Message from {message.author.mention} has", "Bot CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog):", "os import environ from disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog", "Message, TextChannel from disnake.ext.commands import Cog from disnake.http import Route from src.impl.bot import", "for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot)", "on_message(self, message: Message) -> None: if message.channel.id not in CHANNELS: return if not", "in CHANNELS: return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\",", "None: if message.channel.id not in CHANNELS: return if not isinstance(message.channel, TextChannel): return if", "type: ignore if lc: await lc.send( f\"Message from {message.author.mention} has been automatically deleted", "= payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, )", "in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot) -> None:", "Route from src.impl.bot import Bot CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS", "CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def", "if message.channel.id not in CHANNELS: return if not isinstance(message.channel, TextChannel): return if (", "disnake.ext.commands import Cog from disnake.http import Route from src.impl.bot import Bot CHANNELS =", "async def on_message(self, message: Message) -> None: if message.channel.id not in CHANNELS: return", "def on_message(self, message: Message) -> None: if message.channel.id not in CHANNELS: return if", "on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"] not in CHANNELS: return message_id =", "if not isinstance(message.channel, TextChannel): return if ( message.stickers or message.attachments or message.embeds or", "CHANNELS: return if not isinstance(message.channel, TextChannel): return if ( message.stickers or message.attachments or", "bot @Cog.listener() async def on_message(self, message: Message) -> None: if message.channel.id not in", "or message.content != message.channel.name ): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type:", "if lc: await lc.send( f\"Message from {message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\",", "or message.components or message.content != message.channel.name ): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS)", "import Bot CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class", "-> None: if message.channel.id not in CHANNELS: return if not isinstance(message.channel, TextChannel): return", "bot: Bot) -> None: self.bot = bot @Cog.listener() async def on_message(self, message: Message)", "await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore if lc: await lc.send(", "__init__(self, bot: Bot) -> None: self.bot = bot @Cog.listener() async def on_message(self, message:", "message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id,", "channel_id = payload[\"channel_id\"] await self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) ) def", "disnake.http import Route from src.impl.bot import Bot CHANNELS = [int(c) for c in", "or message.embeds or message.components or message.content != message.channel.name ): await message.delete() lc: TextChannel", "ignore if lc: await lc.send( f\"Message from {message.author.mention} has been automatically deleted in", "deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload: dict)", "from disnake.http import Route from src.impl.bot import Bot CHANNELS = [int(c) for c", "isinstance(message.channel, TextChannel): return if ( message.stickers or message.attachments or message.embeds or message.components or", "LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot) -> None: self.bot =", "allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload: dict) -> None: if", "int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot) -> None: self.bot = bot @Cog.listener()", "if payload[\"channel_id\"] not in CHANNELS: return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"] await", "= int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot: Bot) -> None: self.bot = bot", "-> None: self.bot = bot @Cog.listener() async def on_message(self, message: Message) -> None:", "return if not isinstance(message.channel, TextChannel): return if ( message.stickers or message.attachments or message.embeds", "lc: await lc.send( f\"Message from {message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False,", "not isinstance(message.channel, TextChannel): return if ( message.stickers or message.attachments or message.embeds or message.components", "AllowedMentions, Message, TextChannel from disnake.ext.commands import Cog from disnake.http import Route from src.impl.bot", "self.bot.http.request( Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) ) def setup(bot: Bot) -> None:", "in CHANNELS: return if not isinstance(message.channel, TextChannel): return if ( message.stickers or message.attachments", "Cog from disnake.http import Route from src.impl.bot import Bot CHANNELS = [int(c) for", "self.bot.get_channel(LOGS) # type: ignore if lc: await lc.send( f\"Message from {message.author.mention} has been", "-> None: if payload[\"channel_id\"] not in CHANNELS: return message_id = payload[\"message_id\"] channel_id =", "( message.stickers or message.attachments or message.embeds or message.components or message.content != message.channel.name ):", "from os import environ from disnake import AllowedMentions, Message, TextChannel from disnake.ext.commands import", "= bot @Cog.listener() async def on_message(self, message: Message) -> None: if message.channel.id not", "None: if payload[\"channel_id\"] not in CHANNELS: return message_id = payload[\"message_id\"] channel_id = payload[\"channel_id\"]", "message.stickers or message.attachments or message.embeds or message.components or message.content != message.channel.name ): await", "None: self.bot = bot @Cog.listener() async def on_message(self, message: Message) -> None: if", "message.components or message.content != message.channel.name ): await message.delete() lc: TextChannel = self.bot.get_channel(LOGS) #", "TextChannel = self.bot.get_channel(LOGS) # type: ignore if lc: await lc.send( f\"Message from {message.author.mention}", "{message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload: dict) -> None:", "@Cog.listener() async def on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"] not in CHANNELS:", "not in CHANNELS: return if not isinstance(message.channel, TextChannel): return if ( message.stickers or", "lc.send( f\"Message from {message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False),", "everyone=False), ) @Cog.listener() async def on_raw_message_edit(self, payload: dict) -> None: if payload[\"channel_id\"] not", "await lc.send( f\"Message from {message.author.mention} has been automatically deleted in {message.channel.mention}:\\n\\n{message.content[:1800]}\", allowed_mentions=AllowedMentions(users=False, roles=False,", "import Route from src.impl.bot import Bot CHANNELS = [int(c) for c in environ[\"CHANNELS\"].split(\";\")]", "[int(c) for c in environ[\"CHANNELS\"].split(\";\")] LOGS = int(environ[\"LOGS\"]) class AutoMod(Cog): def __init__(self, bot:", "Route( \"DELETE\", \"/channels/{channel_id}/messages/{message_id}\", channel_id=channel_id, message_id=message_id, ) ) def setup(bot: Bot) -> None: bot.add_cog(AutoMod(bot))", "AutoMod(Cog): def __init__(self, bot: Bot) -> None: self.bot = bot @Cog.listener() async def", "Message) -> None: if message.channel.id not in CHANNELS: return if not isinstance(message.channel, TextChannel):" ]
[ "a general post to mah friends!\") sender.send(msg, transport) def random_message(self, transport): sender =", "= weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif", "p1.key) #log.debug('{} and {} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers =", "None or recipient == sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE,", "self.peer recipients = set() if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return", "+ random.choice(hosts) def generate_random_peers(number=100): for i in range(0, number): p = Peer(random_address(i)) yield", ":-(\".format(sender)) return while recipient is None or recipient == sender: recipient = random.choice(list(sender.friends.values()))", "import Peer, Friend from tenet.utils import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def", "= SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address]", "import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer = peer", "for i in range(0, links): p1 = random.choice(peers) p2 = None while p2", "in generate_random_peers(num_people)] [log.debug(x) for x in peers] for p in peers: G.add_node(p.address) random_friendships([p.peer", "p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address,", "while p2 is None or p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address)", "self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender", "needs Graphviz and either PyGraphviz or Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8))", "for p in peers], G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id =", "available_actions = [('connect', 1), ('none', 3)] a = weighted_choice(available_actions) if a == 'send_msg':", "self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender = self.peer recipients", "[x for x in generate_random_peers(num_people)] [log.debug(x) for x in peers] for p in", "= Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try:", "class SimulatedPeer(object): def __init__(self, peer): self.peer = peer self.connected = True def simulate(self,", "in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 = peer_by_id[e[0]] p2", "friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r =", "NOTE: maybe simulate offline posts # so that connection behaviour and a sudden", "wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender = self.peer recipients =", "for i in range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None,", "p in peers], G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {}", "p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(),", "graphviz_layout except ImportError: raise ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\")", "in peers], G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for", "sender.send(msg, transport) def random_message(self, transport): sender = self.peer recipient = None if not", "== 'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{}", "disconnecting\".format(self.peer)) self.peer.connected = False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True", "sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return while recipient is None or recipient", "( Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import Peer, Friend from tenet.utils", "ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\") import matplotlib.pyplot as plt", "tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import Peer, Friend", "def draw_graph(G): try: from networkx import graphviz_layout except ImportError: raise ImportError(\"This example needs", "[log.debug(x) for x in peers] for p in peers: G.add_node(p.address) random_friendships([p.peer for p", "general post to mah friends!\") sender.send(msg, transport) def random_message(self, transport): sender = self.peer", "peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]]", "2)) else: # NOTE: maybe simulate offline posts # so that connection behaviour", "sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients)", "so that connection behaviour and a sudden egress of messages # doesn't mess", "as plt plt.figure(1, figsize=(8,8)) # layout graphs with positions using graphviz neato #pos=nx.graphviz_layout(G,", "= random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg =", "p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try: from networkx import graphviz_layout except ImportError:", "def __init__(self, peer): self.peer = peer self.connected = True def simulate(self, transport, env):", "def random_friendships(peers, G=None, density=0.1): x = len(peers) links = int(x*x*density) for i in", "p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try: from networkx", "G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in G.nodes(): peer_by_id[n] =", "exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and", "list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate offline posts #", "available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate offline", "i in range(0, links): p1 = random.choice(peers) p2 = None while p2 is", "else: # NOTE: maybe simulate offline posts # so that connection behaviour and", "a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport)", "random_message(self, transport): sender = self.peer recipient = None if not sender.friends: log.debug(\"{} has", "or recipient == sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello", "recipients = set() if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients", "= len(peers) links = int(x*x*density) for i in range(0, links): p1 = random.choice(peers)", "matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout graphs with positions using graphviz neato", "simulate(self, transport, env): actions = [('friend_post', 4), ('send_msg', 4)] while True: available_actions =", "random_post(self, transport): sender = self.peer recipients = set() if not sender.friends: log.debug(\"{} has", "either PyGraphviz or Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout graphs", "messages # doesn't mess things up available_actions = [('connect', 1), ('none', 3)] a", "= [('connect', 1), ('none', 3)] a = weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{}", "int(x*x*density) for i in range(0, links): p1 = random.choice(peers) p2 = None while", "= random.choice(peers) p2 = None while p2 is None or p1 == p2:", "using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40,", "tenet.utils import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer =", "egress of messages # doesn't mess things up available_actions = [('connect', 1), ('none',", "{}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona',", "and either PyGraphviz or Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout", "random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender = self.peer recipients = set() if", "is None or recipient == sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address],", "a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif", "doesn't mess things up available_actions = [('connect', 1), ('none', 3)] a = weighted_choice(available_actions)", "text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel',", "{} for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1", "p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {} are now", "log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self,", "SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x = len(peers) links = int(x*x*density) for i", "up available_actions = [('connect', 1), ('none', 3)] a = weighted_choice(available_actions) if a ==", "def simulate(self, transport, env): actions = [('friend_post', 4), ('send_msg', 4)] while True: available_actions", "MessageTypes.SHARE, text=\"This is a general post to mah friends!\") sender.send(msg, transport) def random_message(self,", "+ '_' + str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100): for i in", "random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text=\"This is", "'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) +", "post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a", "['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com',", "return random.choice(names) + '_' + str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100): for", "log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) <", "p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys too p1.friends[p2.address] =", "Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout graphs with positions using", "message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif", "x = len(peers) links = int(x*x*density) for i in range(0, links): p1 =", "self.peer = peer self.connected = True def simulate(self, transport, env): actions = [('friend_post',", "p1 = random.choice(peers) p2 = None while p2 is None or p1 ==", "len(peers) links = int(x*x*density) for i in range(0, links): p1 = random.choice(peers) p2", "random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts =", "gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x in generate_random_peers(num_people)] [log.debug(x) for x in", "'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names)", "a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield", "draw_graph(G): try: from networkx import graphviz_layout except ImportError: raise ImportError(\"This example needs Graphviz", "a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a == 'connect': log.info(\"{}", "sender = self.peer recipients = set() if not sender.friends: log.debug(\"{} has no friends", "self.peer recipient = None if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return", "recipients.add(r) msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text=\"This is a", "msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names =", "# layout graphs with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos,", "= False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration", "log.debug(\"{} has no friends :-(\".format(sender)) return while recipient is None or recipient ==", "'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_' + str(i) + '@' + random.choice(hosts)", "1), ('none', 3)] a = weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will send", "'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_' + str(i) + '@'", "example needs Graphviz and either PyGraphviz or Pydot\") import matplotlib.pyplot as plt plt.figure(1,", "figsize=(8,8)) # layout graphs with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4)", "Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x = len(peers) links = int(x*x*density)", "log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will make", "G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in", "SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] =", "prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40, ##node_color=c, #vmin=0.0, #vmax=1.0, #with_labels=False", "from networkx import graphviz_layout except ImportError: raise ImportError(\"This example needs Graphviz and either", "for x in generate_random_peers(num_people)] [log.debug(x) for x in peers] for p in peers:", "G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 = peer_by_id[e[0]] p2 =", "in G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address]", "peers] for p in peers: G.add_node(p.address) random_friendships([p.peer for p in peers], G) return", "and {} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for", "if a == 'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif a ==", "in recipients], MessageTypes.SHARE, text=\"This is a general post to mah friends!\") sender.send(msg, transport)", "return peer_by_id.values(), G def draw_graph(G): try: from networkx import graphviz_layout except ImportError: raise", "ImportError: raise ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\") import matplotlib.pyplot", "Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {} are now friends'.format(p1, p2))", "available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate offline posts # so that connection", "raise ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\") import matplotlib.pyplot as", "yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x = len(peers) links = int(x*x*density) for", "too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {} are", "from tenet.peer import Peer, Friend from tenet.utils import weighted_choice log = logging.getLogger(__name__) class", "peer): self.peer = peer self.connected = True def simulate(self, transport, env): actions =", "simulate offline posts # so that connection behaviour and a sudden egress of", "'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will", "p2 = None while p2 is None or p1 == p2: p2 =", "def random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts", "a == 'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post':", "Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try: from networkx import graphviz_layout except", "random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address,", "self.peer.connected = False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport)", "True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate", "in peers] for p in peers: G.add_node(p.address) random_friendships([p.peer for p in peers], G)", "generate_random_peers(num_people)] [log.debug(x) for x in peers] for p in peers: G.add_node(p.address) random_friendships([p.peer for", "transport): sender = self.peer recipient = None if not sender.friends: log.debug(\"{} has no", "= None if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return while recipient", "number): p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x = len(peers)", "plt.figure(1, figsize=(8,8)) # layout graphs with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos')", "friends!\") sender.send(msg, transport) def random_message(self, transport): sender = self.peer recipient = None if", "from tenet.utils import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer", "'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com',", "a = weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport)", "[r.address for r in recipients], MessageTypes.SHARE, text=\"This is a general post to mah", "not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return while recipient is None or", "+ '@' + random.choice(hosts) def generate_random_peers(number=100): for i in range(0, number): p =", "a == 'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect':", "+ str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100): for i in range(0, number):", "def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n)))", "import networkx as nx from tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes )", "__init__(self, peer): self.peer = peer self.connected = True def simulate(self, transport, env): actions", "len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for r", "p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try: from networkx import", "recipient == sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient))", "x in generate_random_peers(num_people)] [log.debug(x) for x in peers] for p in peers: G.add_node(p.address)", "== sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg,", "self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a ==", "as nx from tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer", "yield env.timeout(wait_duration) def random_post(self, transport): sender = self.peer recipients = set() if not", "Friend(p1.address, p1.key) #log.debug('{} and {} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers", "G def draw_graph(G): try: from networkx import graphviz_layout except ImportError: raise ImportError(\"This example", "will send a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will make a", "networkx as nx from tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes ) from", "r in recipients], MessageTypes.SHARE, text=\"This is a general post to mah friends!\") sender.send(msg,", "recipient = None if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return while", "[('friend_post', 4), ('send_msg', 4)] while True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2))", "'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def", "True def simulate(self, transport, env): actions = [('friend_post', 4), ('send_msg', 4)] while True:", ":-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values()))", "peers = [x for x in generate_random_peers(num_people)] [log.debug(x) for x in peers] for", "= Friend(p1.address, p1.key) #log.debug('{} and {} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph()", "layout graphs with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80,", "'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_'", "Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text=\"This is a general post to", "'_' + str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100): for i in range(0,", "for r in recipients], MessageTypes.SHARE, text=\"This is a general post to mah friends!\")", "recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def", "gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for", "logging import networkx as nx from tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes", "p2 is None or p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) #", "recipient is None or recipient == sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address,", "to mah friends!\") sender.send(msg, transport) def random_message(self, transport): sender = self.peer recipient =", "sender: recipient = random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport)", "in range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x", "== p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys too p1.friends[p2.address]", "'geocities.com'] return random.choice(names) + '_' + str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100):", "'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com']", "friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x in generate_random_peers(num_people)] [log.debug(x)", "elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4)", "(peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in G.nodes(): peer_by_id[n]", "= Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try: from networkx import graphviz_layout", "log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected =", "log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer = peer self.connected =", "p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys too", "MessageTypes ) from tenet.peer import Peer, Friend from tenet.utils import weighted_choice log =", "G=None, density=0.1): x = len(peers) links = int(x*x*density) for i in range(0, links):", "# TODO exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key)", "for x in peers] for p in peers: G.add_node(p.address) random_friendships([p.peer for p in", "#log.debug('{} and {} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x", "links = int(x*x*density) for i in range(0, links): p1 = random.choice(peers) p2 =", "G.add_edge(p1.address, p2.address) # TODO exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] =", "r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE,", "reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport):", "False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration =", "while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for", "logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer = peer self.connected = True def", "p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {} are now friends'.format(p1, p2)) def", "while True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe", "peers], G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n", "return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r)", "post to mah friends!\") sender.send(msg, transport) def random_message(self, transport): sender = self.peer recipient", "env): actions = [('friend_post', 4), ('send_msg', 4)] while True: available_actions = list(actions) if", "random import logging import networkx as nx from tenet.message import ( Message, DictTransport,", "and a sudden egress of messages # doesn't mess things up available_actions =", "behaviour and a sudden egress of messages # doesn't mess things up available_actions", "num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg", "peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def", "G=nx.Graph() peers = [x for x in generate_random_peers(num_people)] [log.debug(x) for x in peers]", "[('connect', 1), ('none', 3)] a = weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will", "peer_by_id = {} for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in", "import logging import networkx as nx from tenet.message import ( Message, DictTransport, MessageSerializer,", "transport, env): actions = [('friend_post', 4), ('send_msg', 4)] while True: available_actions = list(actions)", "self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate offline posts # so that", "MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel',", "p2.address) # TODO exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address,", "or p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys", "None while p2 is None or p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address,", "peer_by_id.values(), G def draw_graph(G): try: from networkx import graphviz_layout except ImportError: raise ImportError(\"This", "random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address]", "'yahoo.com', 'geocities.com'] return random.choice(names) + '_' + str(i) + '@' + random.choice(hosts) def", "= [('friend_post', 4), ('send_msg', 4)] while True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect',", "= set() if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients =", "num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for r in recipients],", "= peer self.connected = True def simulate(self, transport, env): actions = [('friend_post', 4),", "mess things up available_actions = [('connect', 1), ('none', 3)] a = weighted_choice(available_actions) if", "random_friendships(peers, G=None, density=0.1): x = len(peers) links = int(x*x*density) for i in range(0,", "= int(x*x*density) for i in range(0, links): p1 = random.choice(peers) p2 = None", "['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_' + str(i) +", "return while recipient is None or recipient == sender: recipient = random.choice(list(sender.friends.values())) msg", "transport) def random_message(self, transport): sender = self.peer recipient = None if not sender.friends:", "graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40, ##node_color=c,", "def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x in generate_random_peers(num_people)] [log.debug(x) for x", "has no friends :-(\".format(sender)) return while recipient is None or recipient == sender:", "weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif a", "MessageSerializer, MessageTypes ) from tenet.peer import Peer, Friend from tenet.utils import weighted_choice log", "a sudden egress of messages # doesn't mess things up available_actions = [('connect',", "import random import logging import networkx as nx from tenet.message import ( Message,", "Friend from tenet.utils import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer):", "things up available_actions = [('connect', 1), ('none', 3)] a = weighted_choice(available_actions) if a", "msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text=\"This is a general", "text=\"This is a general post to mah friends!\") sender.send(msg, transport) def random_message(self, transport):", "n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 = peer_by_id[e[0]]", "range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x =", "random.choice(peers) p2 = None while p2 is None or p1 == p2: p2", "== 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer))", "'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer))", "of messages # doesn't mess things up available_actions = [('connect', 1), ('none', 3)]", "density=0.1): x = len(peers) links = int(x*x*density) for i in range(0, links): p1", "self.connected = True def simulate(self, transport, env): actions = [('friend_post', 4), ('send_msg', 4)]", "is a general post to mah friends!\") sender.send(msg, transport) def random_message(self, transport): sender", "is None or p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO", "hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_' +", "= None while p2 is None or p1 == p2: p2 = random.choice(peers)", "import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout graphs with positions using graphviz", "= random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i):", "True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender = self.peer", "None or p1 == p2: p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange", "def random_post(self, transport): sender = self.peer recipients = set() if not sender.friends: log.debug(\"{}", "= ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_' + str(i)", "nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40, ##node_color=c, #vmin=0.0, #vmax=1.0, #with_labels=False #) plt.savefig(\"tenet.png\",dpi=75)", "SimulatedPeer(object): def __init__(self, peer): self.peer = peer self.connected = True def simulate(self, transport,", "p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x = len(peers) links", "elif a == 'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif a ==", "DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import Peer, Friend from tenet.utils import weighted_choice", "p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G):", "sudden egress of messages # doesn't mess things up available_actions = [('connect', 1),", "sender.send(msg, transport) def random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold',", "import graphviz_layout except ImportError: raise ImportError(\"This example needs Graphviz and either PyGraphviz or", "3)] a = weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will send a message.\".format(self.peer))", "('none', 3)] a = weighted_choice(available_actions) if a == 'send_msg': log.debug(\"{} will send a", "has no friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients:", "Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names = ['Ariel', 'Boris',", "G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] =", "[recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names = ['Ariel', 'Boris', 'Carrie',", "if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate offline posts # so", "'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a == 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected", "= Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names = ['Ariel',", "friends :-(\".format(sender)) return while recipient is None or recipient == sender: recipient =", "from tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import Peer,", "now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x in generate_random_peers(num_people)]", "with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G,", "sender = self.peer recipient = None if not sender.friends: log.debug(\"{} has no friends", "G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e", "self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif a", "maybe simulate offline posts # so that connection behaviour and a sudden egress", "make a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False", "Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G def draw_graph(G): try: from", "positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos,", "= Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {} are now friends'.format(p1,", "networkx import graphviz_layout except ImportError: raise ImportError(\"This example needs Graphviz and either PyGraphviz", "#pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40, ##node_color=c, #vmin=0.0, #vmax=1.0,", "pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40, ##node_color=c, #vmin=0.0, #vmax=1.0, #with_labels=False #)", "PyGraphviz or Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout graphs with", "transport): sender = self.peer recipients = set() if not sender.friends: log.debug(\"{} has no", "offline posts # so that connection behaviour and a sudden egress of messages", "= {} for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges():", "log.debug(\"{} will make a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected", "elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected = False elif a == 'connect':", "that connection behaviour and a sudden egress of messages # doesn't mess things", "p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x in generate_random_peers(num_people)] [log.debug(x) for", "in range(0, links): p1 = random.choice(peers) p2 = None while p2 is None", "no friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while len(recipients) < num_recipients: r", "plt plt.figure(1, figsize=(8,8)) # layout graphs with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\")", "set() if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients = random.randint(1,", "for n in G.nodes(): peer_by_id[n] = SimulatedPeer(Peer(random_address(n))) for e in G.edges(): p1 =", "{} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x", "'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com',", "'@' + random.choice(hosts) def generate_random_peers(number=100): for i in range(0, number): p = Peer(random_address(i))", "send a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{} will make a post.\".format(self.peer))", "= random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text=\"This", "transport) def random_address(i): names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana']", "== 'send_msg': log.debug(\"{} will send a message.\".format(self.peer)) self.random_message(transport) elif a == 'friend_post': log.debug(\"{}", "'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return random.choice(names) + '_' + str(i) + '@' +", "4), ('send_msg', 4)] while True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else:", "= ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com',", "if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values())))", "= peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key)", "def random_message(self, transport): sender = self.peer recipient = None if not sender.friends: log.debug(\"{}", "str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100): for i in range(0, number): p", "peer self.connected = True def simulate(self, transport, env): actions = [('friend_post', 4), ('send_msg',", "i in range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1):", "range(0, links): p1 = random.choice(peers) p2 = None while p2 is None or", "# doesn't mess things up available_actions = [('connect', 1), ('none', 3)] a =", "env.timeout(wait_duration) def random_post(self, transport): sender = self.peer recipients = set() if not sender.friends:", "= self.peer recipients = set() if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender))", "for p in peers: G.add_node(p.address) random_friendships([p.peer for p in peers], G) return (peers,", "weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer = peer self.connected", "= True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender =", "or Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) # layout graphs with positions", "import ( Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import Peer, Friend from", "= random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key)", "< num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address for r in", "= Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text=\"This is a general post", ") from tenet.peer import Peer, Friend from tenet.utils import weighted_choice log = logging.getLogger(__name__)", "tenet.peer import Peer, Friend from tenet.utils import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object):", "p2 = random.choice(peers) G.add_edge(p1.address, p2.address) # TODO exchange keys too p1.friends[p2.address] = Friend(p2.address,", "in peers: G.add_node(p.address) random_friendships([p.peer for p in peers], G) return (peers, G) def", "neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r) #nx.draw(G, #pos, #node_size=40, ##node_color=c, #vmin=0.0,", "# NOTE: maybe simulate offline posts # so that connection behaviour and a", "Graphviz and either PyGraphviz or Pydot\") import matplotlib.pyplot as plt plt.figure(1, figsize=(8,8)) #", "if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return while recipient is None", "actions = [('friend_post', 4), ('send_msg', 4)] while True: available_actions = list(actions) if self.peer.connected:", "'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com'] return", "TODO exchange keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{}", "except ImportError: raise ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\") import", "peers: G.add_node(p.address) random_friendships([p.peer for p in peers], G) return (peers, G) def gen_social_graph_2(num_people=10):", "e in G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key)", "mah friends!\") sender.send(msg, transport) def random_message(self, transport): sender = self.peer recipient = None", "# so that connection behaviour and a sudden egress of messages # doesn't", "connection behaviour and a sudden egress of messages # doesn't mess things up", "= self.peer recipient = None if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender))", "None if not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return while recipient is", "return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id = {} for n in G.nodes():", "generate_random_peers(number=100): for i in range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers,", "keys too p1.friends[p2.address] = Friend(p2.address, p2.key) p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {}", "G.add_node(p.address) random_friendships([p.peer for p in peers], G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325)", "= True def simulate(self, transport, env): actions = [('friend_post', 4), ('send_msg', 4)] while", "= [x for x in generate_random_peers(num_people)] [log.debug(x) for x in peers] for p", "x in peers] for p in peers: G.add_node(p.address) random_friendships([p.peer for p in peers],", "links): p1 = random.choice(peers) p2 = None while p2 is None or p1", "peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return", "names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana'] hosts = ['example.com',", "= list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE: maybe simulate offline posts", "recipients], MessageTypes.SHARE, text=\"This is a general post to mah friends!\") sender.send(msg, transport) def", "random.choice(hosts) def generate_random_peers(number=100): for i in range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p)", "= Peer(random_address(i)) yield SimulatedPeer(p) def random_friendships(peers, G=None, density=0.1): x = len(peers) links =", "= random.randint(1,4) yield env.timeout(wait_duration) def random_post(self, transport): sender = self.peer recipients = set()", "no friends :-(\".format(sender)) return while recipient is None or recipient == sender: recipient", "not sender.friends: log.debug(\"{} has no friends :-(\".format(sender)) return num_recipients = random.randint(1, len(list(sender.friends.values()))) while", "def generate_random_peers(number=100): for i in range(0, number): p = Peer(random_address(i)) yield SimulatedPeer(p) def", "p2.friends[p1.address] = Friend(p1.address, p1.key) #log.debug('{} and {} are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10):", "for e in G.edges(): p1 = peer_by_id[e[0]] p2 = peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address,", "will make a post.\".format(self.peer)) self.random_post(transport) elif a == 'disconnect': log.info(\"{} disconnecting\".format(self.peer)) self.peer.connected =", "graphs with positions using graphviz neato #pos=nx.graphviz_layout(G, prog=\"neato\") pos=nx.get_node_attributes(G,'pos') nx.draw_networkx_edges(G,pos,alpha=0.4) nx.draw_networkx_nodes(G,pos, node_size=80, cmap=plt.cm.Reds_r)", "random.choice(names) + '_' + str(i) + '@' + random.choice(hosts) def generate_random_peers(number=100): for i", "len(list(sender.friends.values()))) while len(recipients) < num_recipients: r = random.choice(list(sender.friends.values())) recipients.add(r) msg = Message(sender.address, [r.address", "nx from tenet.message import ( Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import", "4)] while True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: # NOTE:", "are now friends'.format(p1, p2)) def gen_social_graph_1(num_people=10): G=nx.Graph() peers = [x for x in", "while recipient is None or recipient == sender: recipient = random.choice(list(sender.friends.values())) msg =", "== 'connect': log.info(\"{} reconnecting\".format(self.peer)) self.peer.connected = True self.peer.on_connect(transport) wait_duration = random.randint(1,4) yield env.timeout(wait_duration)", "random_friendships([p.peer for p in peers], G) return (peers, G) def gen_social_graph_2(num_people=10): G=nx.random_geometric_graph(num_people,0.325) peer_by_id", "try: from networkx import graphviz_layout except ImportError: raise ImportError(\"This example needs Graphviz and", "= peer_by_id[e[1]] p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key) p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key) return peer_by_id.values(), G", "p in peers: G.add_node(p.address) random_friendships([p.peer for p in peers], G) return (peers, G)", "Message, DictTransport, MessageSerializer, MessageTypes ) from tenet.peer import Peer, Friend from tenet.utils import", "Peer, Friend from tenet.utils import weighted_choice log = logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self,", "posts # so that connection behaviour and a sudden egress of messages #", "random.choice(list(sender.friends.values())) msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text=\"Hello {}!\".format(recipient)) sender.send(msg, transport) def random_address(i): names", "('send_msg', 4)] while True: available_actions = list(actions) if self.peer.connected: available_actions.append(('disconnect', 2)) else: #", "= logging.getLogger(__name__) class SimulatedPeer(object): def __init__(self, peer): self.peer = peer self.connected = True" ]
[ "class Result: def __init__(self, test_suites: List[TestSuite], duration: float = 0.0) -> None: self.test_suites", "= test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result", "build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict = {}", "None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls,", "self.test_cases_count == 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\"", "in cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob:", "prevents changing lengths of this collection during loop execution for test_case_name in test_case_names:", "in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs", "result def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path] = set()", "return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed to collect test cases\")", "= test_suite_glob or default_test_suite_glob or \".\" if not test_case_glob: test_case_glob = \"*\" return", "from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import", "Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram,", "- start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict", "test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in", "and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths =", "-> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault(", "import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException from", "List[str] = [\"Collected\"] suites_count = len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\") else:", "if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob(", "List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram", "= glob(test_suite_glob, recursive=True) for match in matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path))", ") -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ]", "parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict", "for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths:", "for test_suite in test_suites] ) self.duration = duration def log(self, logger: Logger): if", "Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for filepath in filepaths:", "logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def __init__( self, starknet_compiler: StarknetCompiler, ) ->", "suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\")", "ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob]", "ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath,", "test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path] = set() matches = glob(test_suite_glob, recursive=True)", "collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed:", "test_suite_glob = test_suite_glob or default_test_suite_glob or \".\" if not test_case_glob: test_case_glob = \"*\"", "Set[str] ) -> Set[str]: result = ( test_case_names.copy() ) # copy prevents changing", "Path from time import time from typing import Dict, List, Optional, Set from", "self, targets: Set[Target], default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target,", "else: logger.warning(\"No cases found\") def __init__( self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler", "def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names =", "match in matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name):", "= self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]:", "test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] = None", "dataclass from fnmatch import fnmatch from glob import glob from logging import Logger", "TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict: test_suite_info", "suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1:", "test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path]", "test_case_name in test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result", "TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\",", "0.0) -> None: self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in", "if len(function_names) > 0 else None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try:", "non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites) ) end_time = time()", "TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict:", "class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls, target: Target, default_test_suite_glob:", "duration def log(self, logger: Logger): if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count =", "starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"),", "= Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, )", "= Path TestCaseGlob = str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath,", "import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor", "def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, )", "PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from", "from pathlib import Path from time import time from typing import Dict, List,", "TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict:", "for test_case_name in test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return", "[], test_suites) ) end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time )", "test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self,", "TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]:", "\"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del result[ignored_target_path] return result def", "self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda", "set() for test_case_name in test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name)", "matches = glob(test_suite_glob, recursive=True) for match in matches: path = Path(match) if path.is_dir():", "self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), )", "re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str) -> bool: return any( test_re.match(filename) for", "def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return", "test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict:", "len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count", ") test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) !=", "import defaultdict from dataclasses import dataclass from fnmatch import fnmatch from glob import", "re from collections import defaultdict from dataclasses import dataclass from fnmatch import fnmatch", "_build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed)", "None if \"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or", "-> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names:", "result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f}", "ParsedTarget.from_target(target, default_test_suite_glob) for target in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict:", "{ ParsedTarget.from_target(target, default_test_suite_glob) for target in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict,", "in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return", "!= [], test_suites) ) end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time", "result ): del result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict,", "TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def", "ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs =", "Set[str] = set() for test_case_name in test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name,", "if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) ->", "result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict", "target in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) ->", "result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def __init__(", "TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob:", "filename: str) -> bool: return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def", "= \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs:", "= defaultdict(set) for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path", "return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram )", "class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result: def __init__(self, test_suites: List[TestSuite], duration:", "return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result:", "= Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def", "StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\"", "0 else None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except", "test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path)", "fnmatch from glob import glob from logging import Logger from pathlib import Path", "disable=no-self-use import re from collections import defaultdict from dataclasses import dataclass from fnmatch", "filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites", "_filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]: result = ( test_case_names.copy() ) #", ") -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod", "results def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]:", "self, test_case_names: Set[str] ) -> Set[str]: result = ( test_case_names.copy() ) # copy", "targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\":", "= time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict( self, parsed_targets:", "from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob", "in matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path)", "{} for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path,", "if not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path:", "function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" ) return function_names[0] if", "from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath = Path TestCaseGlob = str", "return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self,", "-> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target in targets } def filter_out_ignored_test_suites(", "1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else:", "\"__setup__\" ) return function_names[0] if len(function_names) > 0 else None def _preprocess_contract(self, file_path:", "( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del result[ignored_target_path] return result", "_find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name:", "result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def __init__( self, starknet_compiler: StarknetCompiler,", "return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def", "copy prevents changing lengths of this collection during loop execution for test_case_name in", "results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path]", "pathlib import Path from time import time from typing import Dict, List, Optional,", "fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass", ") test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ]", "parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets:", "( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation", "def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for", "supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str) -> bool:", "path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for", "None: self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in test_suites] )", "test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str )", "Result: def __init__(self, test_suites: List[TestSuite], duration: float = 0.0) -> None: self.test_suites =", "-> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed:", "Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls, target:", ") from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler", "def log(self, logger: Logger): if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count = len(self.test_suites)", "_preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err)", "self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names", "= self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict(", "for filepath in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def", "or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict", "in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict:", "StarknetCompiler TestSuiteGlob = str TestSuitePath = Path TestCaseGlob = str Target = str", "= self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites)", "during loop execution for test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name,", ") -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" )", "TestCaseGlob @classmethod def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] =", "if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]:", "fn_name == \"__setup__\" ) return function_names[0] if len(function_names) > 0 else None def", "import StarknetCompiler TestSuiteGlob = str TestSuitePath = Path TestCaseGlob = str Target =", "float = 0.0) -> None: self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for", "self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites) )", "if \"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob", "List[TestSuite], duration: float = 0.0) -> None: self.test_suites = test_suites self.test_cases_count = sum(", "_find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path] = set() matches =", "predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names", "test_case_names.copy() ) # copy prevents changing lengths of this collection during loop execution", "default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets),", ") return function_names[0] if len(function_names) > 0 else None def _preprocess_contract(self, file_path: Path)", "= None, ) -> \"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets", "import re from collections import defaultdict from dataclasses import dataclass from fnmatch import", "start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict =", ") def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed,", "> 0 else None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path)", "result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test case\")", "from fnmatch import fnmatch from glob import glob from logging import Logger from", "( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite", "test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names =", "defaultdict from dataclasses import dataclass from fnmatch import fnmatch from glob import glob", "result: TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo(", "import TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str", "set() for filepath in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results", "ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict,", "default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None if \"::\"", "preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" ) return function_names[0] if len(function_names) > 0", "from time import time from typing import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error", "fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]:", "suites_count = len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\")", ") def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda", "@dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str])", "self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") )", "if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count = len(self.test_suites) if suites_count == 1:", "in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or \".\"", "result = ( test_case_names.copy() ) # copy prevents changing lengths of this collection", "loop execution for test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob):", "predicate=lambda fn_name: fn_name == \"__setup__\" ) return function_names[0] if len(function_names) > 0 else", "= str TestSuitePath = Path TestCaseGlob = str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\"", "setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed,", "-> bool: return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def collect( self,", "ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict:", "self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed to collect test cases\") from", ") -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names", "\".\" if not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo:", "glob from logging import Logger from pathlib import Path from time import time", "import Logger from pathlib import Path from time import time from typing import", "test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or \".\" if not test_case_glob:", "== 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result))", "test_suites] ) self.duration = duration def log(self, logger: Logger): if self.test_cases_count: result: List[str]", "for test_re in cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] =", "time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget],", "if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del result[ignored_target_path] return", "Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self,", "Optional[TestCaseGlob] = None if \"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob =", "= set() for filepath in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return", "set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for filepath in filepaths: path = Path(filepath)", "test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs =", "str ) -> Set[TestSuitePath]: results: Set[Path] = set() matches = glob(test_suite_glob, recursive=True) for", "= set() for test_case_name in test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob):", "test_file: (test_file.test_case_names) != [], test_suites) ) end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time", "if self.test_cases_count == 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\")", "test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration = duration", "[\"Collected\"] suites_count = len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\")", "self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) ->", "= set() matches = glob(test_suite_glob, recursive=True) for match in matches: path = Path(match)", "path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths", "= [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str) -> bool: return", "self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info", "TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for", "def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None,", "result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]: result = ( test_case_names.copy()", "Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target in targets } def filter_out_ignored_test_suites( self,", "from logging import Logger from pathlib import Path from time import time from", "path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path)", "= target test_case_glob: Optional[TestCaseGlob] = None if \"::\" in target: (test_suite_glob, test_case_glob) =", "starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str) ->", "protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob =", "result: Set[str] = set() for test_case_name in test_case_names: for test_case_glob in self.test_case_globs: if", "test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ):", "return { ParsedTarget.from_target(target, default_test_suite_glob) for target in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict:", "try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed to collect test", "= None if \"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob", "Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result", "for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict =", "\"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob", "Logger from pathlib import Path from time import time from typing import Dict,", "parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results", "in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str]", "self.duration = duration def log(self, logger: Logger): if self.test_cases_count: result: List[str] = [\"Collected\"]", "TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob", "TestCollector: class Result: def __init__(self, test_suites: List[TestSuite], duration: float = 0.0) -> None:", "TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) ->", "return results def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] = None ) ->", "class TestCollector: class Result: def __init__(self, test_suites: List[TestSuite], duration: float = 0.0) ->", "import glob from logging import Logger from pathlib import Path from time import", "in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob( self, test_suite_glob:", "TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return", "TestSuitePath = Path TestCaseGlob = str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict =", "result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]: result =", "if suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count ==", "results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True))", "test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs:", "] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names", "test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None if \"::\" in target: (test_suite_glob,", "set() matches = glob(test_suite_glob, recursive=True) for match in matches: path = Path(match) if", "Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches)", "= len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if", "\".join(result)) else: logger.warning(\"No cases found\") def __init__( self, starknet_compiler: StarknetCompiler, ) -> None:", "cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str]", "= [\"Collected\"] suites_count = len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count}", "str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget:", ") end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict(", "fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names =", "suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test case\") else:", "Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed", "for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ),", "def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str] = set() for test_case_name in", "not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path", "results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [", "= ( test_case_names.copy() ) # copy prevents changing lengths of this collection during", "self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration = duration def", "preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite(", "test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names:", "target test_case_glob: Optional[TestCaseGlob] = None if \"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\")", "targets: Set[Target], default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob)", "# copy prevents changing lengths of this collection during loop execution for test_case_name", "self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration", "ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict )", "Set[str]: result: Set[str] = set() for test_case_name in test_case_names: for test_case_glob in self.test_case_globs:", "result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path", "[]), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict =", "None, ) -> \"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets =", "test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def", "@classmethod def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target", "return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info(", "time from typing import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError,", "Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches =", "Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\": start_time = time()", "Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result: def __init__(self, test_suites:", "match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result)", "results: Set[Path] = set() matches = glob(test_suite_glob, recursive=True) for match in matches: path", "= result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if", ") for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets: Set[Target],", "return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path] =", "= self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, )", "ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del result[ignored_target_path] return result def build_test_suite_info_dict( self,", "ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]:", "None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as", ") -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self,", "TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path]", "targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result", "def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] =", "target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or \".\" if", "= str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob", "def __init__(self, test_suites: List[TestSuite], duration: float = 0.0) -> None: self.test_suites = test_suites", ") -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets: test_suite_paths =", "self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self,", "self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict,", "collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None, )", "default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites(", "test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str]", "-> Set[TestSuitePath]: results: Set[Path] = set() matches = glob(test_suite_glob, recursive=True) for match in", "preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def", "glob import glob from logging import Logger from pathlib import Path from time", ") non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites) ) end_time =", "return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram )", "typing import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from", "( test_case_names.copy() ) # copy prevents changing lengths of this collection during loop", "preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]:", "if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException):", "None, default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\": start_time = time() parsed_targets =", "[len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration = duration def log(self, logger: Logger):", "duration: float = 0.0) -> None: self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names)", "def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]: result = ( test_case_names.copy() )", "test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases", "TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result: def __init__(self, test_suites: List[TestSuite],", "results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] = None )", "_find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set()", "(test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or \".\" if not", "for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite:", "fn_name.startswith(\"test_\") ) def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names(", "test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob( self,", "] return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path]", "Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None if", "Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" ) return function_names[0]", "test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), )", "List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self,", "self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path", "test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites =", "ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names)", "-> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob(", "= duration def log(self, logger: Logger): if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count", "from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob]", "recursive=True)) results: Set[Path] = set() for filepath in filepaths: path = Path(filepath) if", ") def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] =", "starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite", "= None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target in targets", "or \".\" if not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class", "time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob )", "function_names[0] if len(function_names) > 0 else None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram:", "): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None if \"::\" in target:", ") def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set)", "Set[str]: result = ( test_case_names.copy() ) # copy prevents changing lengths of this", "else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count}", "self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets", "Set[Target], default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for", "path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results", "collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed,", "import fnmatch from glob import glob from logging import Logger from pathlib import", "test_suite_glob or default_test_suite_glob or \".\" if not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob,", "= time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob", ") -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\"", "self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" ) return function_names[0] if len(function_names) >", "this collection during loop execution for test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs:", "result: List[str] = [\"Collected\"] suites_count = len(self.test_suites) if suites_count == 1: result.append(\"1 suite,\")", "break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector:", "= test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names(", "Optional[str] = None, ) -> \"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob)", ") for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) ->", "ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath = Path TestCaseGlob =", "-> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names )", ") return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram", "logger.warning(\"No cases found\") def __init__( self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler =", "TestCaseGlob = str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True)", "import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import", "def _find_setup_hook_name( self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda", "self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str] = set()", "result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class", "test_suite in test_suites] ) self.duration = duration def log(self, logger: Logger): if self.test_cases_count:", "return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target],", "def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path] = set() matches", "filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for filepath in filepaths: path", "s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def __init__( self, starknet_compiler: StarknetCompiler, )", "List[str]) -> Set[str]: result: Set[str] = set() for test_case_name in test_case_names: for test_case_glob", ") test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites", "-> None: self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in test_suites]", "recursive=True) for match in matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file()", "= self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict =", "path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[", "path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches", "__init__( self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [", "test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict,", "return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str] = set() for", "self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target in", "Set[Path] = set() for filepath in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path)", "<filename>protostar/commands/test/test_collector.py # pylint: disable=no-self-use import re from collections import defaultdict from dataclasses import", "str) -> bool: return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def collect(", "Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) ->", "def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob:", "test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites) ) end_time", "in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed =", "test_suites) ) end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def", "-> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" ) return", "= self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets(", "defaultdict(set) for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in", "self.test_cases_count: result: List[str] = [\"Collected\"] suites_count = len(self.test_suites) if suites_count == 1: result.append(\"1", "from collections import defaultdict from dataclasses import dataclass from fnmatch import fnmatch from", "= Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result: def __init__(self,", "Optional[str] = None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target in", "changing lengths of this collection during loop execution for test_case_name in test_case_names: for", "def is_test_suite(cls, filename: str) -> bool: return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns", "bool: return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def collect( self, targets:", "def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err:", "StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import", "List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str])", "test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return", ") from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception", "-> Set[str]: result: Set[str] = set() for test_case_name in test_case_names: for test_case_glob in", "return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], )", "test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path", "@dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls, target: Target,", "-> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for filepath in", "results: TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob )", ") filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, )", "from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from", "parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]: return {", "is_test_suite(cls, filename: str) -> bool: return any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns )", "Set[TestSuitePath]: results: Set[Path] = set() matches = glob(test_suite_glob, recursive=True) for match in matches:", "= Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target(", "test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names:", "TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return", "ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass", "from typing import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, )", "test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def", "ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names) result =", "-> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def", "test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, ) -> TestSuite: preprocessed", "= str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class", "return results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results:", "import Path from time import time from typing import Dict, List, Optional, Set", "test_suite_path ] return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) -> Set[TestSuitePath]: results:", "build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target", "ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del result[ignored_target_path]", "matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return", "Path) -> Set[TestSuitePath]: filepaths = set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for filepath", "matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def", "== 1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1", "_collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\")", "for target in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, )", "execution for test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name)", "Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets: test_suite_paths", "TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path,", "test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return", "protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath = Path", "filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for", "= self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path,", "re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str) -> bool: return any( test_re.match(filename)", "except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed to collect test cases\") from p_err", "from dataclasses import dataclass from fnmatch import fnmatch from glob import glob from", ") -> Set[str]: result = ( test_case_names.copy() ) # copy prevents changing lengths", "= self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed),", "(test_file.test_case_names) != [], test_suites) ) end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time -", "__init__(self, test_suites: List[TestSuite], duration: float = 0.0) -> None: self.test_suites = test_suites self.test_cases_count", "Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None if \"::\" in", "result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict:", "ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ):", "log(self, logger: Logger): if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count = len(self.test_suites) if", "self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict", "in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del result[ignored_target_path] return result def build_test_suite_info_dict(", "test_case_glob: Optional[TestCaseGlob] = None if \"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob", "test_case_names: List[str]) -> Set[str]: result: Set[str] = set() for test_case_name in test_case_names: for", "starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, ) from protostar.commands.test.test_suite import TestSuite from protostar.protostar_exception import ProtostarException", "results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path) ->", "results: Set[Path] = set() for filepath in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name):", "TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) ->", "test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def __init__( self,", ") test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict,", "result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No", ") self.duration = duration def log(self, logger: Logger): if self.test_cases_count: result: List[str] =", "test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob]", "preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names(", "target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None", "test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob(", "), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path", "self, targets: List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None, ) ->", "in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result", "del result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) ->", "target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or \".\" if not test_case_glob: test_case_glob =", "= test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path", "= starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str)", "len(function_names) > 0 else None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return", "def __init__( self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns =", "ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict =", "in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self,", "import time from typing import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import (", "TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result: def", "self, preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name", "= 0.0) -> None: self.test_suites = test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite", "# pylint: disable=no-self-use import re from collections import defaultdict from dataclasses import dataclass", "result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test case\") else: result.append(f\"{self.test_cases_count} test", "found\") def __init__( self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns", "return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class", "TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path in", "in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo]", "-> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in", "self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result:", "filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file:", "parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results: TestCaseGlobsDict = defaultdict(set) for parsed_target in parsed_targets:", "List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import (", "= self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites =", "self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class", "any( test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target], ignored_targets:", "for test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break", "logger: Logger): if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count = len(self.test_suites) if suites_count", "time import time from typing import Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import", "Logger): if self.test_cases_count: result: List[str] = [\"Collected\"] suites_count = len(self.test_suites) if suites_count ==", "self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] )", "default_test_suite_glob) for target in targets } def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict,", "= target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or \".\" if not test_case_glob: test_case_glob", "List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info(", "default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict", "fn_name: fn_name == \"__setup__\" ) return function_names[0] if len(function_names) > 0 else None", "= ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str ) ->", "] @classmethod def is_test_suite(cls, filename: str) -> bool: return any( test_re.match(filename) for test_re", "in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] =", "results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info,", "pass @dataclass class TestCollector: class Result: def __init__(self, test_suites: List[TestSuite], duration: float =", "case\") else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\")", "collections import defaultdict from dataclasses import dataclass from fnmatch import fnmatch from glob", "in result ): del result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict:", ") -> \"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets(", "collection during loop execution for test_case_name in test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if", "test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list( filter(lambda test_file: (test_file.test_case_names) != [],", "1: result.append(\"1 suite,\") else: result.append(f\"{suites_count} suites,\") result.append(\"and\") if self.test_cases_count == 1: result.append(\"1 test", "from glob import glob from logging import Logger from pathlib import Path from", "StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ]", "Path TestCaseGlob = str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]]", "= sum( [len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration = duration def log(self,", "ignored_target_path in result ): del result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict,", "cases found\") def __init__( self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler", "self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo,", "\"*\" return cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob]", "test_case_glob: TestCaseGlob @classmethod def from_target( cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob]", "lengths of this collection during loop execution for test_case_name in test_case_names: for ignored_test_case_glob", "cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def __init__( self, starknet_compiler:", "test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result def", "-> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ] def", "end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites, duration=end_time - start_time ) def build_test_case_globs_dict( self,", ") -> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict: test_suite_info =", "if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path:", "filter(lambda test_file: (test_file.test_case_names) != [], test_suites) ) end_time = time() return TestCollector.Result( test_suites=non_empty_test_suites,", "ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\": start_time =", "test_path=test_suite_info.path, test_case_names=matching_test_case_names, preprocessed_contract=preprocessed, setup_fn_name=self._find_setup_hook_name(preprocessed), ) def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]:", "test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict", "filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict:", "str TestSuitePath = Path TestCaseGlob = str Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict", "in test_suites] ) self.duration = duration def log(self, logger: Logger): if self.test_cases_count: result:", "protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath = Path TestCaseGlob = str Target", "else: result.append(f\"{self.test_cases_count} test cases\") result.append(f\"({self.duration:.3f} s)\") logger.info(\" \".join(result)) else: logger.warning(\"No cases found\") def", "cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob] ): test_suite_glob: Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] =", "import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath = Path TestCaseGlob", "TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result: def __init__(self, test_suites: List[TestSuite], duration: float", "TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values()", "for match in matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif path.is_file() and", "\"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or", "= self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict( test_suite_info_dict ) non_empty_test_suites = list(", "[ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename: str) -> bool: return any(", "test_suites: List[TestSuite], duration: float = 0.0) -> None: self.test_suites = test_suites self.test_cases_count =", "preprocessed: StarknetPreprocessedProgram ) -> Optional[str]: function_names = self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name ==", "for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob) return results def parse_targets( self, targets: Set[Target], default_test_suite_glob:", "sum( [len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration = duration def log(self, logger:", "[ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info:", "logging import Logger from pathlib import Path from time import time from typing", "test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for test_suite_info in", "or default_test_suite_glob or \".\" if not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob)", "filepath in filepaths: path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict(", "-> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed to", "dataclasses import dataclass from fnmatch import fnmatch from glob import glob from logging", "Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import ( StarknetPreprocessedProgram, )", "= self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict(", "default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target", "ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if", "from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath =", "Target = str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob:", "test_re in cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]] = None,", "test_re.match(filename) for test_re in cls.supported_test_suite_filename_patterns ) def collect( self, targets: List[Target], ignored_targets: Optional[List[Target]]", "TestSuiteGlob = str TestSuitePath = Path TestCaseGlob = str Target = str \"\"\"e.g.", "cls(test_suite_glob, test_case_glob) @dataclass class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self,", "fnmatch import fnmatch from glob import glob from logging import Logger from pathlib", "result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and", "= list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites) ) end_time = time() return", "str \"\"\"e.g. `tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob:", "Dict, List, Optional, Set from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import ( PreprocessorError, ) from starkware.starknet.compiler.starknet_preprocessor import", "Set[Path] = set() matches = glob(test_suite_glob, recursive=True) for match in matches: path =", "} def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result =", "ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets ) filtered_test_case_globs_dict = self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict =", "test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in", "parsed_target in parsed_targets: test_suite_paths = self._find_test_suite_paths_from_glob( parsed_target.test_suite_glob ) for test_suite_path in test_suite_paths: results[test_suite_path].add(parsed_target.test_case_glob)", "def parse_targets( self, targets: Set[Target], default_test_suite_glob: Optional[str] = None ) -> Set[ParsedTarget]: return", "def filter_out_ignored_test_suites( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy()", "class TestSuiteInfo: path: Path test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) ->", "== \"__setup__\" ) return function_names[0] if len(function_names) > 0 else None def _preprocess_contract(self,", "= self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str] =", "duration=end_time - start_time ) def build_test_case_globs_dict( self, parsed_targets: Set[ParsedTarget], ) -> TestCaseGlobsDict: results:", "test_case_names: Set[str] ) -> Set[str]: result = ( test_case_names.copy() ) # copy prevents", ") -> Set[TestSuitePath]: results: Set[Path] = set() matches = glob(test_suite_glob, recursive=True) for match", "set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets) ignored_test_case_globs_dict = self.build_test_case_globs_dict( ignored_parsed_targets )", "return function_names[0] if len(function_names) > 0 else None def _preprocess_contract(self, file_path: Path) ->", "result TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo] class TestCollectingException(ProtostarException): pass @dataclass class TestCollector: class Result:", "self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []), default_test_suite_glob ) test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets)", "self, starknet_compiler: StarknetCompiler, ) -> None: self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"),", "self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict = {} for", ") # copy prevents changing lengths of this collection during loop execution for", "None ) -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target in targets }", "self, test_suite_glob: str ) -> Set[TestSuitePath]: results: Set[Path] = set() matches = glob(test_suite_glob,", "for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self,", "_find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str] = set() for test_case_name in test_case_names:", "ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return result def _find_test_suite_paths_from_glob( self, test_suite_glob: str", "= set(glob(f\"{path}/**/*.cairo\", recursive=True)) results: Set[Path] = set() for filepath in filepaths: path =", ") -> Set[ParsedTarget]: return { ParsedTarget.from_target(target, default_test_suite_glob) for target in targets } def", "self.filter_out_ignored_test_suites( test_case_globs_dict, ignored_test_case_globs_dict, ) test_suite_info_dict = self.build_test_suite_info_dict( filtered_test_case_globs_dict, ignored_test_case_globs_dict, ) test_suites = self._build_test_suites_from_test_suite_info_dict(", "@classmethod def is_test_suite(cls, filename: str) -> bool: return any( test_re.match(filename) for test_re in", "Optional[TestSuiteGlob] = target test_case_glob: Optional[TestCaseGlob] = None if \"::\" in target: (test_suite_glob, test_case_glob)", "TestSuiteInfo, ) -> TestSuite: preprocessed = self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names(", "self._starknet_compiler = starknet_compiler supported_test_suite_filename_patterns = [ re.compile(r\"^test_.*\\.cairo\"), re.compile(r\"^.*_test.cairo\"), ] @classmethod def is_test_suite(cls, filename:", "path = Path(filepath) if TestCollector.is_test_suite(path.name): results.add(path) return results def _build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict,", "def _collect_test_case_names( self, preprocessed: StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name:", "matches = self._find_matching_any_test_case_glob(test_case_names) result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) ->", "for ignored_target_path in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result", "self._preprocess_contract(test_suite_info.path) collected_test_case_names = self._collect_test_case_names(preprocessed) matching_test_case_names = test_suite_info.match_test_case_names( collected_test_case_names ) return TestSuite( test_path=test_suite_info.path, test_case_names=matching_test_case_names,", "file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise", "list( filter(lambda test_file: (test_file.test_case_names) != [], test_suites) ) end_time = time() return TestCollector.Result(", "List[Target], ignored_targets: Optional[List[Target]] = None, default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\": start_time", "`tests/**/::test_*`\"\"\" TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod", "-> \"TestCollector.Result\": start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets", "elif path.is_file() and TestCollector.is_test_suite(path.name): results.add(path) return results def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]:", "StarknetPreprocessedProgram ) -> List[str]: return self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name.startswith(\"test_\") ) def _find_setup_hook_name(", "test_case_glob): result.add(test_case_name) return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]: result", "return result def _filter_out_matching_any_ignored_test_case_glob( self, test_case_names: Set[str] ) -> Set[str]: result = (", "\"::\" in target: (test_suite_glob, test_case_glob) = target.split(\"::\") test_suite_glob = test_suite_glob or default_test_suite_glob or", "in ignored_test_case_globs_dict: if ( \"*\" in ignored_test_case_globs_dict[ignored_target_path] and ignored_target_path in result ): del", "StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError as p_err: print(p_err) raise TestCollectingException(\"Failed to collect", "= test_suites self.test_cases_count = sum( [len(test_suite.test_case_names) for test_suite in test_suites] ) self.duration =", "): del result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, )", "= self._starknet_compiler.get_function_names( preprocessed, predicate=lambda fn_name: fn_name == \"__setup__\" ) return function_names[0] if len(function_names)", "test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs", "and ignored_target_path in result ): del result[ignored_target_path] return result def build_test_suite_info_dict( self, test_case_globs_dict:", "TestCaseGlobsDict, ) -> TestCaseGlobsDict: result = test_case_globs_dict.copy() for ignored_target_path in ignored_test_case_globs_dict: if (", "test_case_globs: Set[TestCaseGlob] ignored_test_case_globs: Set[TestCaseGlob] def match_test_case_names(self, test_case_names: List[str]) -> List[str]: matches = self._find_matching_any_test_case_glob(test_case_names)", "test_suite_info, ) for test_suite_info in test_suite_info_dict.values() ] def _build_test_suite_from_test_suite_info( self, test_suite_info: TestSuiteInfo, )", "= {} for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(),", "pylint: disable=no-self-use import re from collections import defaultdict from dataclasses import dataclass from", "list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str] = set() for test_case_name", "TestSuite from protostar.protostar_exception import ProtostarException from protostar.utils.starknet_compilation import StarknetCompiler TestSuiteGlob = str TestSuitePath", "in test_case_names: for test_case_glob in self.test_case_globs: if fnmatch(test_case_name, test_case_glob): result.add(test_case_name) return result def", "result = self._filter_out_matching_any_ignored_test_case_glob(matches) return list(result) def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]: result: Set[str]", "start_time = time() parsed_targets = self.parse_targets(set(targets), default_test_suite_glob) ignored_parsed_targets = self.parse_targets( set(ignored_targets or []),", "_build_test_suites_from_test_suite_info_dict( self, test_suite_info_dict: TestSuiteInfoDict, ) -> List[TestSuite]: return [ self._build_test_suite_from_test_suite_info( test_suite_info, ) for", "@dataclass class TestCollector: class Result: def __init__(self, test_suites: List[TestSuite], duration: float = 0.0)", "default_test_suite_glob or \".\" if not test_case_glob: test_case_glob = \"*\" return cls(test_suite_glob, test_case_glob) @dataclass", "-> Set[str]: result = ( test_case_names.copy() ) # copy prevents changing lengths of", "glob(test_suite_glob, recursive=True) for match in matches: path = Path(match) if path.is_dir(): results.update(self._find_test_suite_paths_in_dir(path)) elif", "of this collection during loop execution for test_case_name in test_case_names: for ignored_test_case_glob in", "def build_test_suite_info_dict( self, test_case_globs_dict: TestCaseGlobsDict, ignored_test_case_globs_dict: TestCaseGlobsDict, ) -> TestSuiteInfoDict: result: TestSuiteInfoDict =", "Dict[TestSuitePath, Set[TestCaseGlob]] @dataclass(frozen=True) class ParsedTarget: test_suite_glob: TestSuiteGlob test_case_glob: TestCaseGlob @classmethod def from_target( cls,", "import dataclass from fnmatch import fnmatch from glob import glob from logging import", "test_suite_path, TestSuiteInfo( test_case_globs=set(), ignored_test_case_globs=set(), path=test_suite_path, ), ) test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in", "TestSuiteInfoDict = {} for test_suite_path in test_case_globs_dict: test_suite_info = result.setdefault( test_suite_path, TestSuiteInfo( test_case_globs=set(),", "test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path] if test_suite_path in ignored_test_case_globs_dict: test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[ test_suite_path ] return", "else None def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram: try: return self._starknet_compiler.preprocess_contract(file_path) except PreprocessorError", "test_case_names: for ignored_test_case_glob in self.ignored_test_case_globs: if fnmatch(test_case_name, ignored_test_case_glob): result.remove(test_case_name) break return result TestSuiteInfoDict", "= None, default_test_suite_glob: Optional[str] = None, ) -> \"TestCollector.Result\": start_time = time() parsed_targets" ]
[ "message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status \"\"\" status =", "__create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "= { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer }", "= json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def", "# ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to leader({request.message})\")", "self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is", ") if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx", "__handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target = request.message #", "오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을", "License. \"\"\"Send tx to leader. Store tx temporary while leader is broken\"\"\" import", "this file except in compliance with the License. # You may obtain a", "에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe", "loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target =", "StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO block generator", "# logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained", "} self.__peer_id = None self.__stub_to_peer_service = None # ObjectManager().tx_service = self self.__stub_to_leader =", "def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target = request.message", "raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status \"\"\" status = dict() status['status']", "handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler create", "if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx service", "ANY KIND, either express or implied. # See the License for the specific", "return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target", "을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지", "# limitations under the License. \"\"\"Send tx to leader. Store tx temporary while", "있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer", "\"\"\"Send tx to leader. Store tx temporary while leader is broken\"\"\" import logging", "create tx continue() Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return", "와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try", "logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler", "status = dict() status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s :", "result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx service create_tx", "\" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우", "self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None", "PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to", "logging.warning(\"in tx service create tx continue() Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained", "leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status =", "OF ANY KIND, either express or implied. # See the License for the", "import pickle import queue from enum import Enum from loopchain.baseservice import ObjectManager, StubManager", "pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is", "tx = request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status", "context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target,", "= pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader", "request, context): # logging.debug(\"TxService got request: \" + str(request)) if request.code in self.__handler_map.keys():", "self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): #", "result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code !=", "loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to inner peer({request.message})\")", "ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx", "loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e:", "을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect", "self.start() def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item =", "Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to", "limitations under the License. \"\"\"Send tx to leader. Store tx temporary while leader", "request.message # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. #", "Status \"\"\" status = dict() status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status", "logging.warning(\"Leader is complained your tx just stored in queue by temporally: \" +", "def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler create tx\") tx = request.object tx_object", "request: \" + str(request)) if request.code in self.__handler_map.keys(): return self.__handler_map[request.code](request, context) return loopchain_pb2.Message(code=message_code.Response.not_treat_message_code)", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "tx service create tx continue() Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return", ") return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target", "# 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer =", "if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just stored in", "Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은", "Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self,", "logging.debug(\"TxService got request: \" + str(request)) if request.code in self.__handler_map.keys(): return self.__handler_map[request.code](request, context)", "self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service = None # ObjectManager().tx_service = self self.__stub_to_leader", "configure as conf class PeerProcessStatus(Enum): normal = 1 leader_complained = 2 class TxService(Container,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer", "request, context): logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target = request.message # 자신을", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status \"\"\" status = dict()", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status", "complain 방식 변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail", "return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target =", "+ str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService got request: \"", "2017 theloop, Inc. # # Licensed under the Apache License, Version 2.0 (the", "is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in", "= 1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port)", "required by applicable law or agreed to in writing, software # distributed under", "queue by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx),", "inner_peer_target = request.message # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을", "applicable law or agreed to in writing, software # distributed under the License", "= dict() status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\",", "self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start()", "연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target,", "from loopchain.container import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import", "loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패", "or agreed to in writing, software # distributed under the License is distributed", "__handler_status(self, request, context): \"\"\"Service Status \"\"\" status = dict() status['status'] = message_code.Response.success status_json", "self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise", "self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item)", "def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "tx just stored in queue by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx", "!= message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target})", "in queue by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\",", ") self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것", "continue() Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def", "만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은", "handler connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,", "tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just", "맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도", "loopchain import configure as conf class PeerProcessStatus(Enum): normal = 1 leader_complained = 2", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain 방식", "= StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO block", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader:", "위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub,", "License. # You may obtain a copy of the License at # #", "message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service = None", "request, context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server(", "self.__peer_id = None self.__stub_to_peer_service = None # ObjectManager().tx_service = self self.__stub_to_leader = None", "{ message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id", "self.__create_tx_continue() except Exception as e: logging.warning(\"in tx service create tx continue() Exception: \"", "self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if", "compliance with the License. # You may obtain a copy of the License", "tx continue() Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success)", "the License. \"\"\"Send tx to leader. Store tx temporary while leader is broken\"\"\"", "tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx)", "# loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request,", "PeerProcessStatus(Enum): normal = 1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port):", "# 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe", "은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. #", "message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service = None # ObjectManager().tx_service = self", "== PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just stored in queue by", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time(", "create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader", "governing permissions and # limitations under the License. \"\"\"Send tx to leader. Store", "= self self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx =", "StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner peer: \" +", "self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO", "loopchain_pb2_grpc, message_code from loopchain import configure as conf class PeerProcessStatus(Enum): normal = 1", "Request(self, request, context): # logging.debug(\"TxService got request: \" + str(request)) if request.code in", "not use this file except in compliance with the License. # You may", "inner peer({request.message})\") inner_peer_target = request.message # 자신을 생성한 부모 Peer 에 접속하기 위한", "__handler_create_tx(self, request, context): # logging.debug(\"TxService handler create tx\") tx = request.object tx_object =", "def Request(self, request, context): # logging.debug(\"TxService got request: \" + str(request)) if request.code", "License, Version 2.0 (the \"License\"); # you may not use this file except", "is complained your tx just stored in queue by temporally: \" + str(self.__stored_tx.qsize()))", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "handler create tx\") tx = request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\")", "Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler", "\"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise", "# logging.debug(\"TxService handler create tx\") tx = request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService", "loopchain.baseservice import ObjectManager, StubManager from loopchain.container import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc,", "got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx", "def __init__(self, port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create:", "안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server(", "# you may not use this file except in compliance with the License.", "Copyright 2017 theloop, Inc. # # Licensed under the Apache License, Version 2.0", "= PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect", "to inner peer({request.message})\") inner_peer_target = request.message # 자신을 생성한 부모 Peer 에 접속하기", "agreed to in writing, software # distributed under the License is distributed on", "connect to inner peer({request.message})\") inner_peer_target = request.message # 자신을 생성한 부모 Peer 에", "PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is", "loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner peer: \" + str(inner_peer_target)) return", "status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json)", "context): \"\"\"Service Status \"\"\" status = dict() status['status'] = message_code.Response.success status_json = json.dumps(status)", "Add Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService", "create tx\") tx = request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try:", "(the \"License\"); # you may not use this file except in compliance with", "leader. Store tx temporary while leader is broken\"\"\" import logging import json import", "이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to", "self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler", "self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while not", "# ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal", "logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained", "\"\"\" status = dict() status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s", "block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader)", "# Unless required by applicable law or agreed to in writing, software #", "message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success,", "by applicable law or agreed to in writing, software # distributed under the", "message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service", "\"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self,", "것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "queue from enum import Enum from loopchain.baseservice import ObjectManager, StubManager from loopchain.container import", "logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request,", "PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just stored in queue by temporally:", "# TODO leader complain 방식 변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( #", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "context): logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target = request.message # 자신을 생성한", "작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\",", "Enum from loopchain.baseservice import ObjectManager, StubManager from loopchain.container import Container from loopchain.protos import", "json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self,", "file except in compliance with the License. # You may obtain a copy", "e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status =", "def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader", "프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True", "your tx just stored in queue by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue()", "import Enum from loopchain.baseservice import ObjectManager, StubManager from loopchain.container import Container from loopchain.protos", "result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message)", "else: try: self.__create_tx_continue() except Exception as e: logging.warning(\"in tx service create tx continue()", "= PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중 임시로 현재 트리거는 중단한다. #", "self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just stored in queue", "License for the specific language governing permissions and # limitations under the License.", "def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get()", "PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while", "self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이 있으면", "to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect", "to in writing, software # distributed under the License is distributed on an", "__handler_status %s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context):", "under the License. \"\"\"Send tx to leader. Store tx temporary while leader is", "발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여", "implied. # See the License for the specific language governing permissions and #", "as conf class PeerProcessStatus(Enum): normal = 1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer):", "\"License\"); # you may not use this file except in compliance with the", "is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할", "except Exception as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e))", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning(\"in tx", "쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모", "tx temporary while leader is broken\"\"\" import logging import json import pickle import", "service create tx continue() Exception: \" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader)", "stored in queue by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call(", "not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx", "Exception as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx)", "전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True)", "and # limitations under the License. \"\"\"Send tx to leader. Store tx temporary", "broken\"\"\" import logging import json import pickle import queue from enum import Enum", "or implied. # See the License for the specific language governing permissions and", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "실패 조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue()", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "# Copyright 2017 theloop, Inc. # # Licensed under the Apache License, Version", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "message_code from loopchain import configure as conf class PeerProcessStatus(Enum): normal = 1 leader_complained", "time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure as", "= request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status ==", "str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중 임시로 현재", "try: self.__create_tx_continue() except Exception as e: logging.warning(\"in tx service create tx continue() Exception:", "loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning(\"in tx service create tx", "pickle import queue from enum import Enum from loopchain.baseservice import ObjectManager, StubManager from", "and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status", "logging import json import pickle import queue from enum import Enum from loopchain.baseservice", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "from loopchain.baseservice import ObjectManager, StubManager from loopchain.container import Container from loopchain.protos import loopchain_pb2,", "pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를", "위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와", "self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception", "is_allow_null_stub=True ) logging.debug(\"try connect to inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def", "is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context):", "time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패 조건", "return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success)", "return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다.", "logging.debug(\"try connect to inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request,", "+ str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code", "= None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): # 저장된", "def __handler_status(self, request, context): \"\"\"Service Status \"\"\" status = dict() status['status'] = message_code.Response.success", "target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain", "self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중 임시로 현재 트리거는", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "# \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def", "you may not use this file except in compliance with the License. #", "None self.__stub_to_peer_service = None # ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer =", "= PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader", "loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure as conf class PeerProcessStatus(Enum):", "theloop, Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService got request: \" + str(request)) if", "loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target = request.message", "수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다.", "loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_leader(self, request, context):", "request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal #", "통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner", "got request: \" + str(request)) if request.code in self.__handler_map.keys(): return self.__handler_map[request.code](request, context) return", "import queue from enum import Enum from loopchain.baseservice import ObjectManager, StubManager from loopchain.container", "use this file except in compliance with the License. # You may obtain", "Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure as conf", "queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item", "just stored in queue by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx =", "None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service", "부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return", "1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map", "+ str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context):", "= self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except", "else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success:", "= self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code != message_code.Response.success:", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "class PeerProcessStatus(Enum): normal = 1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self,", "while leader is broken\"\"\" import logging import json import pickle import queue from", "e: logging.warning(\"in tx service create tx continue() Exception: \" + str(e)) self.__peer_status =", "from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure as conf class", "\" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중", "self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler create tx\") tx", "logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub,", "for the specific language governing permissions and # limitations under the License. \"\"\"Send", "2.0 (the \"License\"); # you may not use this file except in compliance", "normal = 1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self,", "leader complain 방식 변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", #", "to inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): #", "message_code.Response.success: raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning(\"in tx service create", "import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure as conf class PeerProcessStatus(Enum): normal", "+ str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중 임시로", "dict() status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message,", "handler connect to inner peer({request.message})\") inner_peer_target = request.message # 자신을 생성한 부모 Peer", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "json import pickle import queue from enum import Enum from loopchain.baseservice import ObjectManager,", "enum import Enum from loopchain.baseservice import ObjectManager, StubManager from loopchain.container import Container from", "logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your", "leader is broken\"\"\" import logging import json import pickle import queue from enum", "return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler create tx\") tx =", "try: if self.__peer_status == PeerProcessStatus.leader_complained: self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just stored", "생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한", "either express or implied. # See the License for the specific language governing", "stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None", "request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if self.__peer_status == PeerProcessStatus.leader_complained:", "import ObjectManager, StubManager from loopchain.container import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code", "while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if", "None # ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status =", "by temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True", "result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status \"\"\"", "중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # )", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # ) return loopchain_pb2.Message(code=message_code.Response.success)", "를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬", "self self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue()", "TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop:", "TODO leader complain 방식 변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\",", "self.__stub_to_peer_service = None # ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer = None", "# 저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx =", "is broken\"\"\" import logging import json import pickle import queue from enum import", "loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler create tx\") tx = request.object", "connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True", "the License. # You may obtain a copy of the License at #", "loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message)", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop,", "2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status:", "<reponame>extendjh/loopchain # Copyright 2017 theloop, Inc. # # Licensed under the Apache License,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "peer({request.message})\") inner_peer_target = request.message # 자신을 생성한 부모 Peer 에 접속하기 위한 stub", "logging.debug(f\"TxService handler connect to inner peer({request.message})\") inner_peer_target = request.message # 자신을 생성한 부모", "self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service = None # ObjectManager().tx_service =", "# TODO block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is None:", "연결 실패 조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try:", "port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer:", "= request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal", "permissions and # limitations under the License. \"\"\"Send tx to leader. Store tx", "= None # ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer = None self.__peer_status", "= queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty():", "= self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and", "self.__peer_status = PeerProcessStatus.leader_complained # TODO leader complain 방식 변경중 임시로 현재 트리거는 중단한다.", "ObjectManager, StubManager from loopchain.container import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from", "self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code", "self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner peer:", "임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to", "the specific language governing permissions and # limitations under the License. \"\"\"Send tx", ") logging.debug(\"try connect to inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self,", "import logging import json import pickle import queue from enum import Enum from", "with the License. # You may obtain a copy of the License at", "Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status \"\"\" status = dict() status['status'] =", "leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map =", "Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader,", "self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service =", "to leader. Store tx temporary while leader is broken\"\"\" import logging import json", "except Exception as e: logging.warning(\"in tx service create tx continue() Exception: \" +", "Store tx temporary while leader is broken\"\"\" import logging import json import pickle", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "language governing permissions and # limitations under the License. \"\"\"Send tx to leader.", "%s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\")", "통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수", "\"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code != message_code.Response.success: raise Exception(result_add_tx.message) except Exception as", "if result_add_tx is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self,", "tx to leader. Store tx temporary while leader is broken\"\"\" import logging import", "트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") #", "send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을", "조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "from enum import Enum from loopchain.baseservice import ObjectManager, StubManager from loopchain.container import Container", "= StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner peer: \"", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "%s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService", "generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else:", "= None self.__stub_to_peer_service = None # ObjectManager().tx_service = self self.__stub_to_leader = None self.__stub_to_inner_peer", "# stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\") # ) return", "= request.message # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다.", "import json import pickle import queue from enum import Enum from loopchain.baseservice import", "temporary while leader is broken\"\"\" import logging import json import pickle import queue", "from loopchain import configure as conf class PeerProcessStatus(Enum): normal = 1 leader_complained =", "= None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json)", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "specific language governing permissions and # limitations under the License. \"\"\"Send tx to", "\" + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "request, context): \"\"\"Service Status \"\"\" status = dict() status['status'] = message_code.Response.success status_json =", "connect to inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context):", "request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop()", "부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,", "conf class PeerProcessStatus(Enum): normal = 1 leader_complained = 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def", "않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC", "str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True ) if result_add_tx.response_code !=", "inner peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService", "See the License for the specific language governing permissions and # limitations under", "status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop() return", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService __handler_status %s : %s\", request.message, status_json) return", "request, context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): #", "접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send", "leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self,", "return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService got request: \" + str(request))", "is_stub_reuse=True) if result_add_tx is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True ) logging.debug(\"try connect to inner peer: \" + str(inner_peer_target))", "__handler_connect_to_leader(self, request, context): logging.debug(f\"TxService handler connect to leader({request.message})\") leader_target = request.message self.__stub_to_leader =", "\"\"\"Service Status \"\"\" status = dict() status['status'] = message_code.Response.success status_json = json.dumps(status) logging.debug(\"TxService", "TODO block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is None: return", "__handler_stop(self, request, context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context):", "방식 변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "context): # logging.debug(\"TxService got request: \" + str(request)) if request.code in self.__handler_map.keys(): return", "loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure as conf class PeerProcessStatus(Enum): normal =", "Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \" +", "loopchain.container import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain import configure", "message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id =", "peer: \" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService got", "# logging.debug(\"TxService got request: \" + str(request)) if request.code in self.__handler_map.keys(): return self.__handler_map[request.code](request,", "현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx to Leader\")", "stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService handler create tx\")", "tx service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained #", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "context): logging.debug(\"TxService handler stop...\") self.stop() return loopchain_pb2.Message(code=message_code.Response.success) def __handler_create_tx(self, request, context): # logging.debug(\"TxService", "return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService handler connect to inner", "str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService got request: \" +", "self.__stored_tx.put(tx) logging.warning(\"Leader is complained your tx just stored in queue by temporally: \"", "= PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이 있으면 전송한다.", "result_add_tx is None and result_add_tx.response_code != message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request,", "자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를", "as e: logging.warning(\"in tx service create tx continue() Exception: \" + str(e)) self.__peer_status", "경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub", "to leader({request.message})\") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True )", "None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self): # 저장된 작업이", "__init__(self, port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status, message_code.Request.stop: self.__handler_stop, message_code.Request.tx_create: self.__handler_create_tx,", "저장된 작업이 있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times(", "gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True )", "class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map = { message_code.Request.status: self.__handler_status,", "str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success) def __handler_connect_to_inner_peer(self, request, context): logging.debug(f\"TxService", "pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한", "as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status", "StubManager from loopchain.container import Container from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain", "context): # logging.debug(\"TxService handler create tx\") tx = request.object tx_object = pickle.loads(tx) #", "Exception as e: logging.warning(\"in tx service create tx continue() Exception: \" + str(e))", "tx\") tx = request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got tx({tx_object.get_tx_hash()})\") try: if", "= 2 class TxService(Container, loopchain_pb2_grpc.ContainerServicer): def __init__(self, port): Container.__init__(self, port) self.__handler_map = {", ": %s\", request.message, status_json) return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json) def __handler_stop(self, request, context): logging.debug(\"TxService handler", "변경중 임시로 현재 트리거는 중단한다. # stub_to_self_peer.call_in_time( # \"NotifyLeaderBroken\", # loopchain_pb2.CommonRequest(request=\"Fail Add Tx", "!= message_code.Response.success: self.__stored_tx.put(stored_tx_item) raise Exception(result_add_tx.message) def __handler_status(self, request, context): \"\"\"Service Status \"\"\" status", "None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning(\"in tx service", "complained your tx just stored in queue by temporally: \" + str(self.__stored_tx.qsize())) else:", "stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이", "self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning(\"in", "request, context): # logging.debug(\"TxService handler create tx\") tx = request.object tx_object = pickle.loads(tx)", "None self.__stub_to_inner_peer = None self.__peer_status = PeerProcessStatus.normal self.__stored_tx = queue.Queue() self.start() def __create_tx_continue(self):", "\" + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success) def Request(self, request, context): # logging.debug(\"TxService got request:", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "import configure as conf class PeerProcessStatus(Enum): normal = 1 leader_complained = 2 class", "message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service = None # ObjectManager().tx_service", "logging.debug(\"TxService handler create tx\") tx = request.object tx_object = pickle.loads(tx) # logging.debug(f\"TxService got", "service create_tx target({self.__stub_to_leader.target}) Exception: \" + str(e)) self.__stored_tx.put(tx) self.__peer_status = PeerProcessStatus.leader_complained # TODO", "if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e:", "확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception", "raise Exception(result_add_tx.message) except Exception as e: logging.warning(f\"in tx service create_tx target({self.__stub_to_leader.target}) Exception: \"", "있으면 전송한다. while not self.__stored_tx.empty(): stored_tx_item = self.__stored_tx.get() result_add_tx = self.__stub_to_leader.call_in_times( \"AddTx\", loopchain_pb2.TxSend(tx=stored_tx_item),", "temporally: \" + str(self.__stored_tx.qsize())) else: self.__create_tx_continue() result_add_tx = self.__stub_to_leader.call( \"AddTx\", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True )", "self.__handler_create_tx, message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader, message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer } self.__peer_id = None self.__stub_to_peer_service = None #" ]
[ "7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import sys import unittest from yat.test import", "drop table if exists table_set_role7_002 cascade; drop role if exists role7_001; drop role", "create role role7_002 password '{<PASSWORD>}'; grant all privileges to role7_002; alter role role7_001", "sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade; drop table if exists", "with login; alter role role7_002 with login; grant all privileges to role7_001; ''')", "logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002", "role7_002 with login; grant all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG,", "self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''')", "logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION", "self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd", "create table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables where", "SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND,", "PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2", "-U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\",", "drop role if exists role7_001; drop role if exists role7_002; create role role7_001", "self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser') def", "{macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg)", "\"drop table if exists table_set_role7_002 cascade;\" \"drop role if exists role7_001, role7_002;\" \"drop", "WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "role7_001 password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant all privileges to role7_002;", "= self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd =", "'{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int);", "grant all privileges to role7_002; alter role role7_001 with login; alter role role7_002", "testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger =", "-W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set", "f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c", "-W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\",", "logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop", "tablename ='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port}", "\"\"\" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL", "cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd =", "-c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------')", ": 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表", "table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import sys import unittest from yat.test", "-W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\",", "alter role role7_001 with login; alter role role7_002 with login; grant all privileges", "(c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You", "PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS,", ": History : \"\"\" import sys import unittest from yat.test import macro from", "Constant from testcase.utils.CommonSH import CommonSH logger = Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------')", "self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET", "msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from pg_tables where tablename ='table_set_role7_001'; ''')", "import Constant from testcase.utils.CommonSH import CommonSH logger = Logger() class Privategrant(unittest.TestCase): def setUp(self):", "the Mulan PSL v2. You may obtain a copy of Mulan PSL v2", "alter role role7_002 with login; grant all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG,", "sys import unittest from yat.test import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from", "MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for", "See the Mulan PSL v2 for more details. \"\"\" \"\"\" Case Type :", "begin SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET", "\"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg)", "{macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg)", "password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id", "期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History", "with login; grant all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd)", "msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION", "self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if", "PSL v2 for more details. \"\"\" \"\"\" Case Type : 功能测试 Case Name", "Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this", "{macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg)", "(''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name}", "-W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\",", "role role7_002 with login; grant all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd)", "create table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end;", "Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import", "under Mulan PSL v2. You can use this software according to the terms", "BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT", "terms and conditions of the Mulan PSL v2. You may obtain a copy", "role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg)", "macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant import", "pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name}", "You can use this software according to the terms and conditions of the", "int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables where tablename ='table_set_role7_002'; ''')", "PSL v2. You can use this software according to the terms and conditions", "\" \"if exists table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002 cascade;\" \"drop role", "to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd", "logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from pg_tables", "role7_002 password '{<PASSWORD>}'; grant all privileges to role7_002; alter role role7_001 with login;", "table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd)", "msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\"", "''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd =", "self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER,", "from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant", "from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d", "v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT", "select tableowner from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH};", "msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = ('''", "''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET", "role if exists role7_001; drop role if exists role7_002; create role role7_001 password", "exists role7_002; create role role7_001 password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant", "AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,", "self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION", "drop table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; drop", "logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local session", "logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade; drop table", "may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS", "CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade;", "FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details.", "SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner", "CURRENT_USER; select tableowner from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd = f''' source", "Case Name : set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002", "role7_001; drop role if exists role7_002; create role role7_001 password '{<PASSWORD>}'; create role", "msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if", "Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use", "if exists role7_001; drop role if exists role7_002; create role role7_001 password '{<PASSWORD>}';", "macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f'''", "from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d", "drop table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION", "drop table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; ''')", "Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger = Logger() class", "msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd", "= self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002", "import unittest from yat.test import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger", "role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql", "self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table", "role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT", "-p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result()", "-W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\",", "openGauss is licensed under Mulan PSL v2. You can use this software according", "logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd", "-U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\",", "software according to the terms and conditions of the Mulan PSL v2. You", "= Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH", "import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant", "期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd", "table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end; SELECT SESSION_USER,", "obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED", "Name : set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002", "AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local session AUTHORIZATION role7_002", "def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade; drop", "WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end; SELECT", "table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------')", "table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create", "logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT", "exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; drop role if exists", "='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U", "password '{<PASSWORD>}'; grant all privileges to role7_002; alter role role7_001 with login; alter", "SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import sys", "sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER;", "self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002 password", "A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. \"\"\" \"\"\"", "{self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg =", "= Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self):", "sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local", "copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN", "logger = Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH =", "self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001", "self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from pg_tables where tablename ='table_set_role7_001';", "role7_001 with login; alter role role7_002 with login; grant all privileges to role7_001;", "from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger = Logger() class Privategrant(unittest.TestCase):", "logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source", "FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. \"\"\"", "-c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------')", "\"\"\" import sys import unittest from yat.test import macro from yat.test import Node", "= f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD}", "logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT", "IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER", "logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------')", "local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION AUTHORIZATION", "CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U", "table \" \"if exists table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002 cascade;\" \"drop", "self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from pg_tables where", "exists table_set_role7_002 cascade; drop role if exists role7_001; drop role if exists role7_002;", "History : \"\"\" import sys import unittest from yat.test import macro from yat.test", "Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant", "role role7_001 password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant all privileges to", "licensed under Mulan PSL v2. You can use this software according to the", "''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f'''", "AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d", "SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local session AUTHORIZATION", "if exists table_set_role7_002 cascade;\" \"drop role if exists role7_001, role7_002;\" \"drop group if", "IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR", "\"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg)", "sql_cmd = (f''' begin SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table", "role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create table", "from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger", "AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner from", "Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if", "='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U", "SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功", "exists role7_001; drop role if exists role7_002; create role role7_001 password '{<PASSWORD>}'; create", "SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER,", "table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables where tablename ='table_set_role7_002';", "LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan", "login; grant all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG,", "''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = ('''", "功能测试 Case Name : set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set", "cascade;\" \"drop table if exists table_set_role7_002 cascade;\" \"drop role if exists role7_001, role7_002;\"", "grant all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd)", "self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql('''", "AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT; create", "期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002", "NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the", "AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION", "BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See", "password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant all privileges to role7_002; alter", "to role7_002; alter role role7_001 with login; alter role role7_002 with login; grant", "msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd =", "PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. \"\"\" \"\"\" Case", "excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W", "2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET", "-U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\",", "= self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002", "{macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002", "where tablename ='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p", "= CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001", "AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import sys import", "table_set_role7_002 cascade; drop role if exists role7_001; drop role if exists role7_002; create", "= (f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd", "msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001", "class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant =", "sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade; drop table if exists", "sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd =", "Case Type : 功能测试 Case Name : set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功", "期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER;", "(''' select tableowner from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd = f''' source", "the Mulan PSL v2 for more details. \"\"\" \"\"\" Case Type : 功能测试", "OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more", "SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql", "'{<PASSWORD>}'; grant all privileges to role7_002; alter role role7_001 with login; alter role", "DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name}", "= (''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f'''", "yat.test import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from", "SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH};", "password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d", "table if exists table_set_role7_002 cascade;\" \"drop role if exists role7_001, role7_002;\" \"drop group", "table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd", "msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT;", "create role role7_001 password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant all privileges", "'{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name}", "期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import sys import unittest from", "\"if exists table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002 cascade;\" \"drop role if", "= self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION", "self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002 cascade;\"", "test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade; drop table", "= self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT", "import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH", "Type : 功能测试 Case Name : set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER,", "\"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd", "role role7_001 with login; alter role role7_002 with login; grant all privileges to", "role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER,", "drop role if exists role7_002; create role role7_001 password '{<PASSWORD>}'; create role role7_002", "if exists table_set_role7_002 cascade; drop role if exists role7_001; drop role if exists", "if exists role7_002; create role role7_001 password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}';", "import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger = Logger()", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A", "1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功", "all privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER,", "role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg)", "THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY", ": 功能测试 Case Name : set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001", "a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON", "TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL", "tableowner from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql", "self.Constant = Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop", "at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES", "KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR", "v2 for more details. \"\"\" \"\"\" Case Type : 功能测试 Case Name :", "from testcase.utils.CommonSH import CommonSH logger = Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode", ": \"\"\" import sys import unittest from yat.test import macro from yat.test import", "self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION", "according to the terms and conditions of the Mulan PSL v2. You may", "'{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant all privileges to role7_002; alter role", "from yat.test import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger", "if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS,", "table_set_role7_002 cascade;\" \"drop role if exists role7_001, role7_002;\" \"drop group if exists group7;\")", ": set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET", "Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------')", "tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\" \"drop table", "table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; drop role", "exists table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002 cascade;\" \"drop role if exists", "SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION", "SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p", "logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f'''", "sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql", "Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at:", "IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT", "exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------')", "DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''')", "privileges to role7_002; alter role role7_001 with login; alter role role7_002 with login;", "cascade;\" \"drop role if exists role7_001, role7_002;\" \"drop group if exists group7;\") logger.info(sql_cmd)", "ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY", "session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int); SET SESSION AUTHORIZATION DEFAULT;", "(f''' begin SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id int);", "\"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING", "role if exists role7_002; create role role7_001 password '{<PASSWORD>}'; create role role7_002 password", "self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table", "You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE", "sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''')", "int); SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER;", "table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables where tablename", "AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect", "self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------')", "self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade;", "self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd =", "= self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner", "the terms and conditions of the Mulan PSL v2. You may obtain a", "-c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\",", "def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\" \"drop", "and conditions of the Mulan PSL v2. You may obtain a copy of", "table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002 cascade;\" \"drop role if exists role7_001,", "select tableowner from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH};", "CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH};", "SESSION_USER, CURRENT_USER; select tableowner from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd = f'''", "testcase.utils.CommonSH import CommonSH logger = Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode =", "logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd", "= (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d", "conditions of the Mulan PSL v2. You may obtain a copy of Mulan", "self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists", "3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION", "Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant()", "tablename ='table_set_role7_001'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port}", "msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists", "sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd", "SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port}", "setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh =", "exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd)", "4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查", "''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd =", "期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade; drop table if", "Mulan PSL v2. You can use this software according to the terms and", "more details. \"\"\" \"\"\" Case Type : 功能测试 Case Name : set AUTHORIZATION", "= Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table", "sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\" \"drop table if exists", "PURPOSE. See the Mulan PSL v2 for more details. \"\"\" \"\"\" Case Type", "\"\"\" Case Type : 功能测试 Case Name : set AUTHORIZATION DEFAULT Description :", "msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from pg_tables where tablename", "DEFAULT; create table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables", "yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from", "ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR", "set AUTHORIZATION DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION", "-c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def", "this software according to the terms and conditions of the Mulan PSL v2.", "{self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg)", "role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg)", "self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd =", "tableowner from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql", "self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select tableowner from", "role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd =", "(f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd =", "all privileges to role7_002; alter role role7_001 with login; alter role role7_002 with", "v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS", "logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \"", "unittest from yat.test import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import", "6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import", "-U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\",", "end; SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd", "exists table_set_role7_002 cascade;\" \"drop role if exists role7_001, role7_002;\" \"drop group if exists", "import CommonSH logger = Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser')", "if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; drop role if", "{macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg)", "logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists table_set_role7_001 cascade; drop table if", "= (''' select tableowner from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd = f'''", "is licensed under Mulan PSL v2. You can use this software according to", "self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade;", "-d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg", "msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}';", "for more details. \"\"\" \"\"\" Case Type : 功能测试 Case Name : set", "2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can", "PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS", "msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION", "http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF", "-c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\",", "= (f''' begin SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}'; create table table_set_role7_001(id", "role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg)", "logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT; SELECT", "SET SESSION AUTHORIZATION DEFAULT; create table table_set_role7_002(id int); end; SELECT SESSION_USER, CURRENT_USER; select", "of the Mulan PSL v2. You may obtain a copy of Mulan PSL", "CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION", "5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History :", "logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\" \"drop table if", "Expect : History : \"\"\" import sys import unittest from yat.test import macro", "table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; drop role if exists role7_001;", "role7_002; alter role role7_001 with login; alter role role7_002 with login; grant all", "cascade; drop role if exists role7_001; drop role if exists role7_002; create role", "details. \"\"\" \"\"\" Case Type : 功能测试 Case Name : set AUTHORIZATION DEFAULT", "source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\"", "SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source", "import sys import unittest from yat.test import macro from yat.test import Node sys.path.append(sys.path[0]+\"/../\")", "logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser')", "INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.", "\"\"\" \"\"\" Case Type : 功能测试 Case Name : set AUTHORIZATION DEFAULT Description", "role role7_002 password '{<PASSWORD>}'; grant all privileges to role7_002; alter role role7_001 with", "= self.commonsh.execut_db_sql(\"drop table \" \"if exists table_set_role7_001 cascade;\" \"drop table if exists table_set_role7_002", "Mulan PSL v2 for more details. \"\"\" \"\"\" Case Type : 功能测试 Case", "AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source {self.DB_ENV_PATH};", "def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH = macro.DB_ENV_PATH self.Constant = Constant() self.commonsh", "Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software", "v2. You can use this software according to the terms and conditions of", "SELECT SESSION_USER, CURRENT_USER; select tableowner from pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd =", "{self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" '''", "Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN \"AS IS\"", "table_set_role7_001 cascade; drop table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET", "''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001", "of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN \"AS", "(''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f''' source", "where tablename ='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name} -p", "self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(f''' drop table if exists", "logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}';", "NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2", "cascade; drop table if exists table_set_role7_002 cascade; drop role if exists role7_001; drop", "Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2.", "AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (''' SET SESSION AUTHORIZATION DEFAULT; SELECT SESSION_USER, CURRENT_USER;", "login; alter role role7_002 with login; grant all privileges to role7_001; ''') logger.info(sql_cmd)", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR", "\"drop role if exists role7_001, role7_002;\" \"drop group if exists group7;\") logger.info(sql_cmd) logger.info('-------------------------Opengauss_Function_Set_Session_Authorization_Case0007执行结束---------------------------')", "DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET local session AUTHORIZATION role7_002 password", "role7_002; create role role7_001 password '{<PASSWORD>}'; create role role7_002 password '{<PASSWORD>}'; grant all", "msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade; drop", "SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}'; SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f'''", "''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表", "EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT", "sys.path.append(sys.path[0]+\"/../\") from testcase.utils.Logger import Logger from testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH", "logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = ('''", "logger.info(msg) self.assertIn(\"SET\", msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd", "msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd = (''' select", "cascade; drop table if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION", "DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001 5.使用初始用户连gsql,删除表 期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect :", "privileges to role7_001; ''') logger.info(sql_cmd) self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd) self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------')", "testcase.utils.Constant import Constant from testcase.utils.CommonSH import CommonSH logger = Logger() class Privategrant(unittest.TestCase): def", "if exists table_set_role7_002 cascade; ''') logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功,", "self.assertIn(\"role7_001\", msg) logger.info('------------------------删除表 期望:删除成功-----------------------------') sql_cmd = self.commonsh.execut_db_sql(''' drop table if exists table_set_role7_001 cascade;", "self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin SET", "can use this software according to the terms and conditions of the Mulan", "gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c \"{sql_cmd}\" ''' logger.info(excute_cmd)", "pg_tables where tablename ='table_set_role7_002'; ''') excute_cmd = f''' source {self.DB_ENV_PATH}; gsql -d {self.userNode.db_name}", "CommonSH logger = Logger() class Privategrant(unittest.TestCase): def setUp(self): logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------') self.userNode = Node('dbuser') self.DB_ENV_PATH", "Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001", "OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,", "= self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------') sql_cmd = self.commonsh.execut_db_sql(\"drop", "msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertIn(\"role7_001\", msg) logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------') sql_cmd = (f''' SET", "sql_cmd = (''' select tableowner from pg_tables where tablename ='table_set_role7_001'; ''') excute_cmd =", "''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self): logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------')", "= macro.DB_ENV_PATH self.Constant = Constant() self.commonsh = CommonSH('dbuser') def test_common_user_permission(self): logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------') sql_cmd =", "sql_cmd) logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------') sql_cmd = (''' SELECT SESSION_USER, CURRENT_USER; ''') excute_cmd = f'''", "\"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_001\", msg) self.assertIn(\"role7_002\", msg) def tearDown(self):", "use this software according to the terms and conditions of the Mulan PSL", "期望:删除成功 6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\"", "logger.info(sql_cmd) self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd) logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------') logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------') sql_cmd = (f''' begin", "DEFAULT Description : 1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功 2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001 3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002 4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT", "to the terms and conditions of the Mulan PSL v2. You may obtain", "\"{sql_cmd}\" ''' logger.info(excute_cmd) msg = self.userNode.sh(excute_cmd).result() logger.info(msg) self.assertNotIn(\"role7_002\", msg) self.assertIn(\"role7_001\", msg) logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------') sql_cmd", "DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查 7.使用初始用户连gsql,清理环境。期望:删除用户成功 Expect : History : \"\"\" import sys import unittest", "table if exists table_set_role7_002 cascade; drop role if exists role7_001; drop role if" ]
[ "of obtained RMSD values \"\"\" RMSDReductions = [] overlap = [] MTM =", "close or the mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap", "status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status", "proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else it", "ref_chain, mob_chain, defvec): \"\"\" Calculate a list of RMSD reductions based increasing number", "i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at", "1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M: \", M for", "betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD", "mode vectors are problematic\" if previousOverlap: currentOverlap = previousOverlap else: currentOverlap = 0", "overall matched chains of the protein which is being deformed towards previousBetas: The", "list of number of modes to successively calculate the RMSD reductions on referenceName:", "the name of the reference, for output debugging purposes filePrefix: file prefix, for", "from the bound structure defvec: the deformation vector stepPointsReduction: list of number of", "from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD from", "fitter timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) try:", "\", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" return initialGuess print \"modesToConsider,", "ANM modes.\"\"\" M = anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans, M) return", "the RMSD reductions on referenceName: the name of the reference, for output debugging", "print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures are already too", "[] L_RMSReductions = [] overlap = [] numModes = Marray.shape[1] Mtrans = Marray.T", "anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array of np.arrays with the modes specified", "proteinTo) # RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD <", "0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas,", "stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard = 0", "mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn)", "length \"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess),", "cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)", "cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \",", "overall matched chain atoms from the bound structure defvec: the deformation vector Returns:", "# continue if guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T,", "same shape as getArray from an ANM object proteinFrom: The overall matched chains", "status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \",", "RMSD between proteins. ''' def __init__(self, utils): ''' Constructor ''' self.utils = utils", "for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i >", "modesToConsider, status else: # how many modes could be calculated on this structure", "Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider, nonTrivialModes, status:", "nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):", "RMSD fitter timeouts filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts", "i # raw_input() # continue if guard < self.utils.config.guard: # calculate betas try:", "MTM = np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print", "the betas Returns: The initial guess vector for the betas, padded with 0.0", "shape as getArray from an ANM object ref_chain: The overall matched chain atoms", "modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox", "< L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at \", i # else", "\"\"\" RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = []", "= ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): #", "mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if", "\", stepPointsReduction guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i", "was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first", "matched chain atoms from the bound structure defvec: the deformation vector Returns: RMSDReductions:", "stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i > numModes: # temporary,", "previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \"", "< RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures are", "np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \",", "+str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)", "numModes = Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction", "# doi:10.3390/ijms11103623. # # Args: # anm: the ANM with modes # defvec:", "filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing number of modes,", "self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue if guard < self.utils.config.guard: #", "matrix inverse times the ANM matrix previousBetas: previously calculated betas Returns: the beta", "\",j,\" M: \", M return M[0] def getModeArrayKeepingFirstK(self, arr, k): k += 1", "np.shape(M) # if len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M) !=", "RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray)", "in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain):", "else: # else guard is >= self.utils.config.guard, and the RMSD reduction should go", "run, store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox <", "0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess,", "store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS:", "towards the matched atoms ref_chain: The overall matched chain atoms from the unbound", "i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes", "deformation vector Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions", "eigenvectors towards the matched atoms ref_chain: The overall matched chain atoms from the", "Marray.T MTM = np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError:", "modes could be calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6", "# \"\"\" # ### old # ### M = anm.getArray() # # Tdefvec", "prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg from timeout", "and the Use of Normal # Modes in Protein-Protein Docking.\" International Journal of", "Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the", "myfile: # myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n \")", "np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans", "RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if", "(anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM)", "other calculations continue # calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i,", "serves as part of the initial guess for the fitter previousOverlap: The previous", "anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\"", "betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\"", "0 to n-1 listofPreviousBetas: the list of previously calculated betas maxModes: the number", "get the betas Returns: The initial guess vector for the betas, padded with", "\",i,\" M: \", M for j in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray()))", "\"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess betas,", "overall matched chains of the protein which is being deformed towards defvec: the", "i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox)", "28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with modes defvec: the deformationvector", "reached or not Returns: the beta coefficents \"\"\" M = anm.getArray() #print \"first", "defvec: the deformation vector MTM: dot product of the ANM matrix inverse times", "investigationsOn): \"\"\" Get the L_RMS of proteinFrom and proteinTo (they need to be", "\"overlap has a numerical problem, maybe the two structures are already too close", "calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD", "# temporary, to speedup other calculations continue if guard < self.utils.config.guard: # calculate", "cg from timeout import timeout from timeout import TimeoutError from collections import OrderedDict", "\"first mode did not lower L_RMS\" # cast objects overlap = np.array(overlap, dtype=np.float64)", "Mtrans = M.T # the default maxiter is too low, increase the number", "skipped\" # return initialGuess # print \"modesToConsider, status: \", modesToConsider, status # return", "too close or the mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD # calc", "of the protein which is being deformed towards defvec: the deformation vector from", "for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create", "and proteinTo (they need to be chain matched). Args: proteinFrom: Deformed protein proteinTo:", "given in : Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal", "if modesToConsider < 1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec)", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess", "for the fitter previousOverlap: The previous overlap previousRMSD: The previous reduced RMSD defvec:", "not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is", "\", MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider,", "mode did not lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec)", "overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower", "\", Marray[0:2] RMSDReductions = [] overlap = [] numModes = Marray.shape[1] #MTM =", "\"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec):", "defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by a scipy optimizer", "self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM)", "status: \", modesToConsider, status # else: # # how many modes could be", "# the default maxiter is too low, increase the number maximalIter = self.utils.config.maxIterBetas", "= mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): #", "structure mob_chain: The overall matched chain atoms from the bound structure defvec: the", "np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess", "L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots", "x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider, status # else: #", "a linear combination with betas. RMSD change from mob_chain to ref_chain Args: anm_slc:", "len(M) != len(Tdefvec)\") # Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use", "M[0] def getModeArrayKeepingFirstK(self, arr, k): k += 1 k = range(0, k) arrCopy", "mob_chain, defvec): \"\"\" Calculate a list of RMSD reductions based increasing number of", "Tapprox to a copy of the unbound structure and get the reduced RMSD", "Get the L_RMS of proteinFrom and proteinTo (they need to be chain matched).", "\"\"\" Calculate a list of RMSD reductions based increasing number of modes, that", "many modes could be calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6", "if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous RMSD lower", "# return initialGuess # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "import calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg import", "ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]:", "full ANM matrix inverse times # the ANM matrix # modesToConsider: up to", "RMSDReductions = [] overlap = [] Mtrans = M.T MTM = np.dot(Mtrans, M)", "mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]:", "formula is given in : # # Moal, <NAME>., and <NAME>. \"SwarmDock and", "betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\"", "print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" # return", "i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at", "RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess([0],", "\", status, \" skipped\" # return initialGuess # print \"modesToConsider, status: \", modesToConsider,", "def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list", "temporary, to speedup other calculations continue # calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T,", "0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo)", "#print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): raise", "guess vector for the betas, padded with 0.0 to reach the correct length", "RMSD was actually lower, the beta calculation was not successful guard += 1", "if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower", "== len(k): return arr else: M = np.dstack(arrCopy)[0][0] #print \"first M in keep", "product of the full ANM matrix inverse times # the ANM matrix #", "return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName,", "Protein-Protein Docking.\" International Journal of Molecular Sciences 11, no. 10 (September 28, 2010):", "overall matched chain atoms from the bound structure defvec: the deformation vector referenceName:", "betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated", "temporary, to speedup other calculations continue if guard < self.utils.config.guard: # calculate betas", "0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \"", "initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with", "comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox", "deformation vector stepPointsReduction: list of number of modes to successively calculate the RMSD", "np.zeros((len(Mbefore), 1)) #print \"M: \", M for i in range(0, len(Mbefore)): M[i] =", "anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for i in range(1, len(excludeFirstK)): M =", "@timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas", "= cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: #", "a linear combination with betas. Args: anm_slc: The sliced ANM, with the corresponding", "RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction", "Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting,", "status: \", M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider,", "ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] #", ">= self.utils.config.guard, and the RMSD reduction should go preconceived # calculate betas Mmode", "proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: #", "to how many modes are given to get the betas Returns: The initial", "modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at", "modesToConsider: up to how many modes the betas should be calculated listofPreviousBetas: the", "\"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox", "len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M: \",", "(September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with modes defvec: the", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store", "RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD reduction run, store", "previousBetas[-1] if len(previousOverlap) == 0: previousOverlap = 0 else: previousOverlap = previousOverlap[-1] if", "timeout at modes\", i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "= M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter =", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status !=", "International Journal of Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623.", "else: print \"previous L_RMS lower at \", i # else the previous LRMS", "anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if", "reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD)", "= Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of proteinFrom and get", "beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec)", "in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def", "Create an initial guess vector, padded with 0.0 values to the correct length.", "''' self.utils = utils def setupMTMforBetas(self, anm): \"\"\" Calculate and return the dot", "= M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if", "numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to", "11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with", "return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\"", "doi:10.3390/ijms11103623. Args: anm: the ANM with modes defvec: the deformationvector MTMfull: dot product", "ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]):", "the unbound structure mob_chain: The overall matched chain atoms from the bound structure", "betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with", "\", status, \" skipped\" return initialGuess print \"modesToConsider, status: \", modesToConsider, status return", "initial guess for the fitter previousOverlap: The previous overlap previousRMSD: The previous reduced", "print \"first mode did not lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap =", "calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap = [] Mtrans = M.T MTM =", ">= self.utils.config.guard, and the RMSD reduction should go preconceived # calculate betas try:", "else: currentOverlap = 0 overlap.append(currentOverlap) guard = 0 else: print \"previous RMSD lower", "deformation vector from proteinFrom to proteinTo referenceName: the name of the reference, for", "len(M) != len(Tdefvec)\") Mtrans = M.T # the default maxiter is too low,", "calculate betas, len(M) != len(Tdefvec)\") # Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1]", "= 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider,", "Args: proteinFrom: Deformed protein proteinTo: Target protein (target of the deformation vector) investigationsON:", "towards defvec: the deformation vector from proteinFrom to proteinTo referenceName: the name of", "defvec: the deformation vector referenceName: the name of the reference Returns: RMSDReductions: The", "the following modes as given by the indices in indicesOfHighest\"\"\" excludeFirstK = range(0,", "= calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\"", "prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg from timeout import timeout from timeout", "### old # ### M = anm.getArray() # # Tdefvec = defvec.getArray() #", "could be calculated on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 #", "if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes, status:", "return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded", "\", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): print \"len(M): \",", "overlap previousRMSD: The previous reduced RMSD defvec: the deformation vector from proteinFrom to", "the formula is given in : # # Moal, <NAME>., and <NAME>. \"SwarmDock", "\"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n", "are already too close or the mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD", "\"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = [] MTM =", "calculated betas Returns: the beta coefficents \"\"\" Tdefvec = defvec.getArray() if len(M) !=", "numModes, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\"", "initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print", "defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing", "if previousOverlap: currentOverlap = previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas", "modes the betas should be calculated # # Returns: # the beta coefficents", "= self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain)", "i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" if overlap:", "len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T # the", "Calculate a list of RMSD reductions based increasing number of modes, that are", "= self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM,", "if len(previousBetas) == 0: previousBetas = [0] else: previousBetas = previousBetas[-1] if len(previousOverlap)", "self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if", "np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem, maybe the", "0 overlap.append(currentOverlap) else: print \"previous RMSD lower at \", i # else the", "1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not", "arr.copy() if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print", "cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider, status", "if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return", "proteinFrom and proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL", "scipy.sparse.linalg import cg from timeout import timeout from timeout import TimeoutError from collections", "Betas modesToConsider: up to how many modes are given to get the betas", "\"RMSD timeout at modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast", "= calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem,", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: #", "normal modes, same shape as getArray from an ANM object ref_chain: The overall", "Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal # Modes in", "on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if", "== 0: previousOverlap = 0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0:", "Use of Normal # Modes in Protein-Protein Docking.\" International Journal of # Molecular", "as getArray from an ANM object ref_chain: The overall matched chain atoms from", "\"Tapprox\") # apply Tapprox to a copy of the unbound structure and get", "prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg", "\"\"\" Create an initial guess vector, padded with 0.0 values to the correct", "modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas,", "self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard = 0 for i", "lower at \", i # else the previous LRMS was actually lower, the", "# betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached RMSD = 1", "= 0 overlap.append(currentOverlap) else: print \"previous RMSD lower at \", i # else", "else it is the first L_RMSD reduction run, store L_RMS reduction results initial_L_RMS", "is the first RMSD reduction run, no need to compare against previous RMSD", "x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print", "betas should be calculated, starting from 0 to n-1 listofPreviousBetas: the list of", "reduction list of obtained RMSD values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions =", "skipped\" return initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def", "None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter,", "len(Tdefvec)\") # Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM", "ANM object ref_chain: The overall matched chain atoms from the unbound structure mob_chain:", "RMSDReductions: The reduction list of obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape", "the list of previously calculated betas maxModes: the number of modes preconceived: has", "''' from prody.measure.measure import calcDeformVector import numpy as np from prody.dynamics.compare import calcOverlap", "to proteinTo referenceName: the name of the reference, for output debugging if the", "#print \"Marray: \", Marray[0:2] RMSDReductions = [] overlap = [] numModes = Marray.shape[1]", "to ref_chain Args: Marray: Array of normal modes, same shape as getArray from", "\"RMSD timeout at modes\", i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\"", "defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at modes\", i,\"using", "calculated # # Returns: # the beta coefficents # \"\"\" # ### old", "was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the first L_RMSD reduction", "combination with betas. RMSD change from mob_chain to ref_chain Args: Marray: Array of", "and the RMSD reduction should go preconceived # calculate betas try: betas =", "padded with 0.0 values to the correct length. Args: listofPreviousBetas: the list of", "= ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox", "and RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox)", "M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for i in range(1, len(excludeFirstK)):", "print \"highe \",j,\" M: \", M return M[0] def getModeArrayKeepingFirstK(self, arr, k): k", "was actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) #", "a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]:", "two structures are already too close or the mode vectors are problematic\" RMSD_after_Tapprox", "previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures", "\"previous L_RMS lower at \", i # else the previous LRMS was actually", "initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or", "= (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if", "tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider,", "\") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\")", "as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n \")", "# RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox:", "= self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt:", "modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM)", "betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\"", "self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap = []", "status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas", "the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it", "\"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes)", "RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap)", "= listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)]) return initialGuess", "mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing", "return M elif len(arr[0]) == len(k): return arr else: M = np.dstack(arrCopy)[0][0] #print", "0.0 or np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes,", "betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and", "def getModeArrayKeepingFirstK(self, arr, k): k += 1 k = range(0, k) arrCopy =", "list of RMSD reductions based increasing number of modes, that are combined in", "ref_chain Args: Marray: Array of normal modes, same shape as getArray from an", "by a scipy optimizer fitting, the formula is given in : # #", "this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)", "nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider >", "the corresponding entries of the eigenvectors towards the matched atoms ref_chain: The overall", "anm.getArray() #print \"first M original: \", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \",", "np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status else:", "= np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self,", "ANM, with the corresponding entries of the eigenvectors towards the matched atoms ref_chain:", "stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for", "if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:", "i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\"", "reduction should go preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas =", "calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas,", "Returns: the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec):", "== \", status, \" skipped\" # return initialGuess # print \"modesToConsider, status: \",", "to be chain matched). Args: proteinFrom: Deformed protein proteinTo: Target protein (target of", "maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas,", "betas, padded with 0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas", "raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") # Mtrans = M.T # MTM", "been reached or not Returns: the beta coefficents \"\"\" M = anm.getArray() #print", "RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem, maybe", "generated by Prody preconceived: has guard from config been reached or not Returns:", "of obtained RMSD values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions = [] overlap", "bound structure defvec: the deformation vector stepPointsReduction: list of number of modes to", "\", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\"", "atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider, nonTrivialModes, status: \",", "did not lower L_RMS\" # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions =", "lower, the beta calculation was not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1])", "deformationvector MTMfull: dot product of the full ANM matrix inverse times the ANM", "overlap = [] Mtrans = M.T MTM = np.dot(Mtrans, M) betasListWhole = []", "timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1])", "else: M = np.dstack(arrCopy)[0][0] #print \"first M in keep first k: \", M", "atoms ref_chain: The overall matched chain atoms from the unbound structure mob_chain: The", "RMSD timeout at modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole,", "== \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL)", "a linear combination with betas. RMSD change from mob_chain to ref_chain Args: Marray:", "proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else it is an", "the protein which is being deformed towards defvec: the deformation vector from proteinFrom", "from mob_chain to ref_chain Args: Marray: Array of normal modes, same shape as", "anm.getArray() # # Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print", "or the mode vectors are problematic, returning overlap 0\" currentOverlap = 0 return", "betas \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas) == 0:", "# #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) # if len(M)", "# Returns: # the beta coefficents # \"\"\" # ### old # ###", "of previously calculated Betas modesToConsider: up to how many modes are given to", "ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec", "mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction", "proteins. ''' def __init__(self, utils): ''' Constructor ''' self.utils = utils def setupMTMforBetas(self,", "the correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x", "dot product of the full ANM matrix inverse times the ANM matrix modesToConsider:", "<NAME>. \"SwarmDock and the Use of Normal Modes in Protein-Protein Docking.\" International Journal", "- 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): #", "are problematic\" if previousOverlap: currentOverlap = previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox,", "range(0, len(Mbefore)): M[i] = Mbefore[i] return M elif len(arr[0]) == len(k): return arr", "LRMS was actually lower, the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: #", "deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS of proteinFrom and", "timeout import timeout from timeout import TimeoutError from collections import OrderedDict class RMSDReducer(object):", "name of the reference Returns: RMSDReductions: The reduction list of obtained RMSD values", "for output debugging purposes Returns: RMSDReductions: The reduction list of obtained RMSD values", "of # Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623.", "\") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox,", "Deformed protein proteinTo: Target protein (target of the deformation vector) investigationsON: \"Complex\" or", "if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M:", "RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or", "already too close or the mode vectors are problematic\" if previousOverlap: currentOverlap =", "overlap = [] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM", "proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list of RMSD", "#print \"shape(M): \", np.shape(M) # if len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate", "full ANM matrix inverse times the ANM matrix modesToConsider: up to how many", "RMSDReductions.append(initial_RMSD) print \"first mode did not lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap", "in a linear combination with betas. RMSD change from mob_chain to ref_chain Args:", "Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1 # reduce every", "the name of the reference, for output debugging if the RMSD fitter timeouts", "1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status =", "status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None):", "#print \"initial M: \", M for i in range(1, len(excludeFirstK)): M = np.dstack((M,", "to speedup other calculations continue # elif RMSDReductions and (RMSDReductions[-1] == 1): #", "of the full ANM matrix inverse times the ANM matrix modesToConsider: up to", "M for j in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe", "np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap)", "modes the betas should be calculated listofPreviousBetas: the list of previously calculated betas", "original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec)", "a scipy optimizer fitting, the formula is given in : # # Moal,", "modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status", "= calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode", "betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached RMSD = 1 at", "the Use of Normal # Modes in Protein-Protein Docking.\" International Journal of #", "status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False):", "# store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap", "structure and get the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox", "not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess,", "#print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM,", "self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "previous betas, serves as part of the initial guess for the fitter previousOverlap:", "#print \"first M in keep first k: \", M for i in range(1,", "been reached or not Returns: the beta coefficents \"\"\" M = anm Tdefvec", "# how many modes could be calculated on this structure nonTrivialModes = maxModes", "\"Marray: \", Marray[0:2] RMSDReductions = [] overlap = [] numModes = Marray.shape[1] #MTM", "else it is an investigation on individual proteins, L_RMS does not apply, #", "myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n \") betas =", "prody.measure.measure import calcDeformVector import numpy as np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode", "Mtrans = M.T MTM = np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10,", "print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans =", "too close or the mode vectors are problematic, returning original RMSD\" RMSD_after_Tapprox =", "stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing number", "betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\")", "Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M)", "to n-1 listofPreviousBetas: the list of previously calculated betas maxModes: the number of", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD", "calculations continue # elif RMSDReductions and (RMSDReductions[-1] == 1): # # we already", "else it is the first RMSD reduction run, no need to compare against", "preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the formula is given", "MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1 # reduce every value", "if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) #", "# calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except", "modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes,", "# Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. #", "= stepPointsReduction - 1 # reduce every value by one to have the", "using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1])", "dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix):", "deformed towards defvec: the deformation vector from proteinFrom to proteinTo referenceName: the name", "Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of proteinFrom", "\"SwarmDock and the Use of Normal # Modes in Protein-Protein Docking.\" International Journal", "number of modes, that are combined in a linear combination with betas. RMSD", "currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical", "object proteinFrom: The overall matched chains of the protein to deform towards proteinTo", "calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True) except", "previously calculated betas anmTuple: anm tuple as generated by Prody preconceived: has guard", "copy of proteinFrom and get the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords()", "stepPointsReduction: list of number of modes to successively calculate the RMSD reductions on", "k) arrCopy = arr.copy() if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M =", "10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with modes defvec:", "many modes the betas should be calculated listofPreviousBetas: the list of previously calculated", "be calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess =", "= [] overlap = [] Mtrans = M.T MTM = np.dot(Mtrans, M) betasListWhole", "proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\"", "defvec, MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\")", "print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0])", "# if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes,", "not lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap)", "RMSD was actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1])", "the beta coefficents \"\"\" M = anm.getArray() #print \"first M original: \", M", "in excludeFirstK, and the following modes as given by the indices in indicesOfHighest\"\"\"", "print \"stepPointsReduction: \", stepPointsReduction guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt:", "overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a", "The RMSDReducer contains method to reduce the RMSD between proteins. ''' def __init__(self,", "speedup other calculations continue # calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM,", "modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) ==", "which is being deformed towards defvec: the deformation vector from proteinFrom to proteinTo", "as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \")", "modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the", "M.T # the default maxiter is too low, increase the number maximalIter =", "defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing number", "def setupMTMforBetas(self, anm): \"\"\" Calculate and return the dot product of all ANM", "i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\"", "proteinTo: The overall matched chains of the protein which is being deformed towards", "defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec)", "RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate", "''' The RMSDReducer contains method to reduce the RMSD between proteins. ''' def", "= [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard", "MTM = np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\"", "overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm,", "<NAME>., and <NAME>. \"SwarmDock and the Use of Normal Modes in Protein-Protein Docking.\"", "#(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM)", "= previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD =", "up to how many modes the betas should be calculated, starting from 0", "of the bound structure and get the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords()", "Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) try: betas =", "utils def setupMTMforBetas(self, anm): \"\"\" Calculate and return the dot product of all", "RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain,", "ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if", "[] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans,", "\"\"\" One shot calculation for the RMSD reduction. Args: Marray: Array of normal", "np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0,", "the two structures are already too close or the mode vectors are problematic\"", "numpy as np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform", "of the protein to deform towards proteinTo proteinTo: The overall matched chains of", "was actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else:", "this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider", "Constructor ''' self.utils = utils def setupMTMforBetas(self, anm): \"\"\" Calculate and return the", "= self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout", "initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "modesToConsider, maxModesOverall): \"\"\" Create an initial guess vector, padded with 0.0 values to", "stepPointsReduction - 1 # reduce every value by one to have the index", "at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox", "or np.isinf(currentOverlap): print \"overlap has a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap", "as getArray from an ANM object proteinFrom: The overall matched chains of the", "M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider", "preconceived # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes,", "config been reached or not Returns: the beta coefficents \"\"\" M = anm", "nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" # return initialGuess #", "= calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and", "problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap =", "\"\"\" Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape print", "previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print", "numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox)", "overall matched chain atoms from the bound structure defvec: the deformation vector stepPointsReduction:", "name of the reference, for output debugging purposes filePrefix: file prefix, for output", "\", modesToConsider, status # else: # # how many modes could be calculated", "\"\"\" Calculate and return the dot product of all ANM modes transposed times", "mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction", "[0] else: previousBetas = previousBetas[-1] if len(previousOverlap) == 0: previousOverlap = 0 else:", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status:", "the name of the reference Returns: RMSDReductions: The reduction list of obtained RMSD", "MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas # #", "in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial", "= Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1 # reduce", "a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard =", "currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec,", "the two structures are already too close or the mode vectors are problematic,", "M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas", "deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and", "of previously calculated betas maxModes: the number of modes preconceived: has guard from", "modes are given to get the betas Returns: The initial guess vector for", "should go preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode,", "fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans,", "RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox,", "np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self,", "# use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original", "mode vectors are problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc", "= self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "else guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived #", "calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix):", "the RMSD fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T MTM", "modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes,", "the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \",", "np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)", "\"first \",i,\" M: \", M for j in range(0, len(indicesOfHighest)): M = np.dstack((M,", "self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas,", "betas. Args: anm_slc: The sliced ANM, with the corresponding entries of the eigenvectors", "\"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the", "The overall matched chains of the protein which is being deformed towards previousBetas:", "continue if guard < self.utils.config.guard: # calculate betas ## new Mmode instead of", "currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull,", "of np.arrays with the modes specified by the indices in excludeFirstK, and the", "maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status", "Args: # anm: the ANM with modes # defvec: the deformationvector # MTMfull:", "the deformation vector MTM: dot product of the ANM matrix inverse times the", "Args: listofPreviousBetas: the list of previously calculated Betas modesToConsider: up to how many", "dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions,", "too close or the mode vectors are problematic\" if previousOverlap: currentOverlap = previousOverlap", "coefficents \"\"\" M = anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print", "numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: # else", "TimeoutError from collections import OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains method to", "are problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap", "\", M return M[0] def getModeArrayKeepingFirstK(self, arr, k): k += 1 k =", "= np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate", "maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas,", "print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess", "Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of proteinFrom and get the", "using previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector", "[x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall]", "M for i in range(0, len(Mbefore)): M[i] = Mbefore[i] return M elif len(arr[0])", "L_RMSReductions = [] overlap = [] numModes = Marray.shape[1] Mtrans = Marray.T MTM", "the deformation vector Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\"", "or not Returns: the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray()", "RMSD # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap", "0.0 values to the correct length. Args: listofPreviousBetas: the list of previously calculated", "L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at \", i #", "on individual proteins, L_RMS does not apply, # return RMSD of individual proteins", "btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status else: # how many", "Mbefore[i] return M elif len(arr[0]) == len(k): return arr else: M = np.dstack(arrCopy)[0][0]", "excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for i in range(1,", "arr, k): k += 1 k = range(0, k) arrCopy = arr.copy() if", "combination with betas. RMSD change from mob_chain to ref_chain Args: anm_slc: The sliced", "raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1]", "currentOverlap = previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self,", "np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider,", "with 0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas initialGuess =", "timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\"", "and (RMSDReductions[-1] == 1): # # we already reached a RMSD Rreduction of", "calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError:", "if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard = 0 else: print", "calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole,", "= (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider >", "lower, the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is", "successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD reduction", "np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) ==", "initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN,", "# else it is an investigation on individual proteins, L_RMS does not apply,", "\", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess betas, status = cg(MTM,", "bound structure defvec: the deformation vector referenceName: the name of the reference Returns:", "return M[0] def getModeArrayKeepingFirstK(self, arr, k): k += 1 k = range(0, k)", "not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the first L_RMSD reduction run,", "the RMSD between proteins. ''' def __init__(self, utils): ''' Constructor ''' self.utils =", "listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial guess vector, padded with 0.0 values", "x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess", "= [] numModes = Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction", "= np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec,", "- 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) ==", "stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt:", "== 0: previousBetas = [0] else: previousBetas = previousBetas[-1] if len(previousOverlap) == 0:", "\", i # else the previous RMSD was actually lower, the beta calculation", "RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions,", "the Use of Normal Modes in Protein-Protein Docking.\" International Journal of Molecular Sciences", "RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest):", "currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\"", "betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox,", "# calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap", "betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\"", "to reach the correct length \"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0", "RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas", "The previous reduced RMSD defvec: the deformation vector from proteinFrom to proteinTo referenceName:", "calculated betas anmTuple: anm tuple as generated by Prody preconceived: has guard from", "The previous betas, serves as part of the initial guess for the fitter", "\"SwarmDock and the Use of Normal Modes in Protein-Protein Docking.\" International Journal of", "betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print", "\"modesToConsider, status: \", modesToConsider, status else: # how many modes could be calculated", "how many modes the betas should be calculated listofPreviousBetas: the list of previously", "pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas # # if modesToConsider < 1: #", "\", M for i in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print", "the RMSD fitter timeouts filePrefix: filePrefix, for output debugging if the RMSD fitter", "close or the mode vectors are problematic\" if previousOverlap: currentOverlap = previousOverlap else:", "anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a scipy", "range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest", "== 1): # # we already reached a RMSD Rreduction of 1.0 #", "import calcDeformVector import numpy as np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import", "of individual proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec,", "M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \", M return M[0]", "RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print", "# calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except", "calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap", "chain atoms from the bound structure defvec: the deformation vector referenceName: the name", "RMSD defvec: the deformation vector from proteinFrom to proteinTo referenceName: the name of", "of modes to successively calculate the RMSD reductions on referenceName: the name of", "dot product of all ANM modes transposed times all ANM modes.\"\"\" M =", "Mmode instead of anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode:", "go preconceived # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole,", "np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction,", "normal modes, same shape as getArray from an ANM object proteinFrom: The overall", "Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a", "except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider,", "return initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self,", "Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec):", "calcDeformVector import numpy as np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector", "anm: the ANM with modes # defvec: the deformationvector # MTMfull: dot product", "status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" # return initialGuess # betas,", "problem, maybe the two structures are already too close or the mode vectors", "Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list of RMSD", "calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T # the default maxiter is", "inverse times # the ANM matrix # modesToConsider: up to how many modes", "modes.\"\"\" M = anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans, M) return MTM", "two structures are already too close or the mode vectors are problematic, returning", "if the RMSD fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T", "following modes as given by the indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK)", "overall matched chains of the protein to deform towards proteinTo proteinTo: The overall", "np.shape(M) if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec)", "previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans,", "modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" return initialGuess print \"modesToConsider, status:", "in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep first \"+str(i)+\":", "original: \", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \",", "\", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas,", "MTM: dot product of the ANM matrix inverse times the ANM matrix previousBetas:", "Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \",", "range 0 to n-1 print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots =", "instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD,", "status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess =", "the full ANM matrix inverse times the ANM matrix modesToConsider: up to how", "!= len(Tdefvec)\") Mtrans = M.T # the default maxiter is too low, increase", "maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):", "towards proteinTo proteinTo: The overall matched chains of the protein which is being", "if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did not lower", "status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans,", "already reached a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1])", "status: \", modesToConsider, status # return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\"", "modesToConsider: up to how many modes are given to get the betas Returns:", "import TimeoutError from collections import OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains method", "if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider):", "vector stepPointsReduction: list of number of modes to successively calculate the RMSD reductions", "reduce every value by one to have the index match the range 0", "defvec: the deformation vector from proteinFrom to proteinTo referenceName: the name of the", "should be calculated listofPreviousBetas: the list of previously calculated betas anmTuple: anm tuple", "L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox)", "= anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M)", "i > self.utils.config.stopRMSDReductionAt or i > numModes: # temporary, to speedup other calculations", "guard < self.utils.config.guard: # calculate betas ## new Mmode instead of anm_slc and", "coefficents \"\"\" Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape", "deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if", "MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas # # if modesToConsider", "deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom and proteinTo \"\"\"", "of all ANM modes transposed times all ANM modes.\"\"\" M = anm.getArray() Mtrans", "= MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1:", "bound structure defvec: the deformation vector Returns: RMSDReductions: The reduction list of obtained", "# calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except", "\", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1],", "open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous", "RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot calculation for the", "ANM matrix # modesToConsider: up to how many modes the betas should be", "modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess betas, status = cg(MTM, np.dot(Mtrans,", "overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard = 0 else: print \"previous", "< initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did not lower RMSD\" betasListWhole.append(betas)", "# cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions,", "M.shape[1] == 1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas", "modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout", "anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName,", "\"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): print \"len(M):", "lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status", "reduction list of obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions =", "calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if", "to deform towards proteinTo proteinTo: The overall matched chains of the protein which", "need to compare against previous RMSD # store betas and RMSD reduction results", "RMSD of individual proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo,", "preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "return initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas,", "Returns: the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() if len(M)", "product of all ANM modes transposed times all ANM modes.\"\"\" M = anm.getArray()", "linear combination with betas. Args: anm_slc: The sliced ANM, with the corresponding entries", "1 # reduce every value by one to have the index match the", "coefficents # \"\"\" # ### old # ### M = anm.getArray() # #", "tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self, M,", "if status != 0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status", "listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with 0.0 values to", "len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \", M for", "len(k): return arr else: M = np.dstack(arrCopy)[0][0] #print \"first M in keep first", "M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \", M for j", "excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK,", "cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status return betas", "self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction =", "\"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull,", "# else the previous RMSD was actually lower, the beta calculation was not", "RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached RMSD = 1 at i:\", i", "if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap = 0", "combined in a linear combination with betas. Args: anm_slc: The sliced ANM, with", "and get the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox =", "= self.utils.config.maxIterBetas # # if modesToConsider < 1: # print \"using one column\"", "# print \"using one column\" # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]", "overall matched chain atoms from the unbound structure mob_chain: The overall matched chain", "is too low, increase the number maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1:", "M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a list", "status, \" skipped\" # return initialGuess # print \"modesToConsider, status: \", modesToConsider, status", "betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "== 0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes,", "timeouts filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts Returns: RMSDReduction", "x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an", "Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes,", "\"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\"", "= calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM)", "betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError: print", "inverse times the ANM matrix previousBetas: previously calculated betas Returns: the beta coefficents", "or np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if", "calcRMSD(proteinFromL, proteinToL) else: # else it is an investigation on individual proteins, L_RMS", "in : Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal Modes", "correct length \"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x in", "k = range(0, k) arrCopy = arr.copy() if len(k) == 1: Mbefore =", "numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else: # else guard is >= self.utils.config.guard,", "Array of normal modes, same shape as getArray from an ANM object proteinFrom:", "shot calculation for the RMSD reduction. Args: Marray: Array of normal modes, same", "\"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom and proteinTo \"\"\" if investigationsOn ==", "else: currentOverlap = 0 overlap.append(currentOverlap) else: # else the previous RMSD was actually", "L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower L_RMS\" # cast objects overlap =", "!= len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter", "\"\"\" RMSDReductions = [] L_RMSReductions = [] overlap = [] numModes = Marray.shape[1]", "M for i in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first", "times all ANM modes.\"\"\" M = anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans,", "the beta calculation was not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else:", "# if status != 0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes,", "= [0] else: previousBetas = previousBetas[-1] if len(previousOverlap) == 0: previousOverlap = 0", "i > numModes: # temporary, to speedup other calculations continue # calculate betas", "> self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue if guard < self.utils.config.guard:", "M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status", "overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10,", "reduce the RMSD between proteins. ''' def __init__(self, utils): ''' Constructor ''' self.utils", "betas maxModes: the number of modes preconceived: has guard from config been reached", "np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName,", "investigation on individual proteins, L_RMS does not apply, # return RMSD of individual", "Marray[0:2] RMSDReductions = [] overlap = [] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0])", "\", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status", "or the mode vectors are problematic\" if previousOverlap: currentOverlap = previousOverlap else: currentOverlap", "range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess", "betas should be calculated # # Returns: # the beta coefficents # \"\"\"", "deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt", "the deformation vector referenceName: the name of the reference Returns: RMSDReductions: The reduction", "self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] #", "M = anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans, M) return MTM def", "mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store", "structures are already too close or the mode vectors are problematic\" if previousOverlap:", "tuple as generated by Prody preconceived: has guard from config been reached or", "betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox,", "expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with 0.0 values", "defvec: the deformation vector Returns: RMSDReductions: The reduction list of obtained RMSD values", "chain atoms from the unbound structure mob_chain: The overall matched chain atoms from", "1 at i:\", i # raw_input() # continue if guard < self.utils.config.guard: #", "print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy()", "i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "method to reduce the RMSD between proteins. ''' def __init__(self, utils): ''' Constructor", "status else: # how many modes could be calculated on this structure nonTrivialModes", "Docking.\" International Journal of Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.", "0: previousOverlap = 0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD", "np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status return betas #", "Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM)", "individual proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas,", "modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas by a scipy optimizer fitting,", "# nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if", "modes as given by the indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M", "it is the first RMSD reduction run, no need to compare against previous", "modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas,", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or", "# \"\"\" Obtain betas by a scipy optimizer fitting, the formula is given", "defvec: the deformationvector MTMfull: dot product of the full ANM matrix inverse times", "matrix previousBetas: previously calculated betas Returns: the beta coefficents \"\"\" Tdefvec = defvec.getArray()", "modesToConsider: up to how many modes the betas should be calculated # #", "matched). Args: proteinFrom: Deformed protein proteinTo: Target protein (target of the deformation vector)", "store betas and RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox <", "calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a list of RMSD reductions based", "if guard < self.utils.config.guard: # calculate betas ## new Mmode instead of anm_slc", "calculated Betas modesToConsider: up to how many modes are given to get the", "# #print \"shape(M): \", np.shape(M) # if len(M) != len(Tdefvec): # raise ValueError(\"Cannot", "modes # defvec: the deformationvector # MTMfull: dot product of the full ANM", "has a numerical problem, maybe the two structures are already too close or", "\"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)])", "betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial guess vector, padded", "reference Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" print \"anm_slc[0].getArray():", "# myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n \") betas", "np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem,", "else: currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous RMSD lower at \", i", "print \"overlap has a numerical problem, maybe the two structures are already too", "calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem, maybe", "beta coefficents \"\"\" M = anm.getArray() #print \"first M original: \", M Tdefvec", "returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector,", "(nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: # print \"modesToConsider,", "elif RMSDReductions and (RMSDReductions[-1] == 1): # # we already reached a RMSD", "\"\"\" if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"')", "if M.shape[1] == 1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if", "MTM, i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous", "RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox,", "previousRMSD: The previous reduced RMSD defvec: the deformation vector from proteinFrom to proteinTo", "or not Returns: the beta coefficents \"\"\" M = anm.getArray() #print \"first M", "betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous", "> self.utils.config.stopRMSDReductionAt or i > numModes: # temporary, to speedup other calculations continue", "how many modes could be calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) -", "2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array defvec: the deformation vector MTM:", "anmTuple: anm tuple as generated by Prody preconceived: has guard from config been", "n-1 print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] =", "ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store", "len(previousBetas) == 0: previousBetas = [0] else: previousBetas = previousBetas[-1] if len(previousOverlap) ==", "10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard = 0 for i in", "if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem, maybe the two", "One shot calculation for the RMSD reduction. Args: Marray: Array of normal modes,", "successively calculate the RMSD reductions on referenceName: the name of the reference, for", "has a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard", "Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1", "ANM matrix previousBetas: previously calculated betas Returns: the beta coefficents \"\"\" Tdefvec =", "to a copy of the bound structure and get the reduced RMSD mob_chain_copy", "chains of the protein to deform towards proteinTo proteinTo: The overall matched chains", ": # # Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal", "problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: # else the", "the mode vectors are problematic, returning overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox,", "for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to", "np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \", M for j in range(0,", "\" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1],", "array defvec: the deformation vector MTM: dot product of the ANM matrix inverse", "matrix modesToConsider: up to how many modes the betas should be calculated listofPreviousBetas:", "= cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans,", "is the first RMSD reduction run, store betas and RMSD reduction results initial_RMSD", "betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print", "= self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "many modes the betas should be calculated, starting from 0 to n-1 listofPreviousBetas:", "status # return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial", "copy of the unbound structure and get the reduced RMSD ref_chain_copy = ref_chain.copy()", "previousBetas: previously calculated betas Returns: the beta coefficents \"\"\" Tdefvec = defvec.getArray() if", "= previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout", "with betas. Args: anm_slc: The sliced ANM, with the corresponding entries of the", "speedup other calculations continue # elif RMSDReductions and (RMSDReductions[-1] == 1): # #", "\", np.shape(M) if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \",", "towards previousBetas: The previous betas, serves as part of the initial guess for", "in keep first k: \", M for i in range(1, len(k)): M =", "betas, len(M) != len(Tdefvec)\") Mtrans = M.T # the default maxiter is too", "< 1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status", "len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T", "dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an", "or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) ==", "self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue # elif RMSDReductions and (RMSDReductions[-1]", "3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array defvec: the deformation vector MTM: dot", "self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "calculations continue # calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole,", "else: if previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status =", "to how many modes the betas should be calculated, starting from 0 to", "stepPointsReduction guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i >", "\" skipped\" # return initialGuess # print \"modesToConsider, status: \", modesToConsider, status #", "mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i]", "getModeArrayKeepingFirstK(self, arr, k): k += 1 k = range(0, k) arrCopy = arr.copy()", "fitter timeouts filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts Returns:", "# else it is the first RMSD reduction run, store betas and RMSD", "= 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: #", "stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS of", "''' Constructor ''' self.utils = utils def setupMTMforBetas(self, anm): \"\"\" Calculate and return", "0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec,", "at i:\", i # raw_input() # continue if guard < self.utils.config.guard: # calculate", "betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "calculated betas maxModes: the number of modes preconceived: has guard from config been", "results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS)", "structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider", "an ANM object ref_chain: The overall matched chain atoms from the unbound structure", "how many modes the betas should be calculated, starting from 0 to n-1", "low, increase the number maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status", "calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except", "24, 2014 @author: oliwa ''' from prody.measure.measure import calcDeformVector import numpy as np", "# calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i,", "\"\"\" Obtain betas by a scipy optimizer fitting, the formula is given in", "matched chain atoms from the bound structure defvec: the deformation vector stepPointsReduction: list", "investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom and proteinTo \"\"\" if investigationsOn", "initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "ANM matrix inverse times the ANM matrix previousBetas: previously calculated betas Returns: the", "conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes,", "np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction:", "\", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) # if len(M) != len(Tdefvec): #", "M.T MTM = np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes())", "= self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector", "= mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or", "raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T # the default", "RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did not lower RMSD\" betasListWhole.append(betas) # calc", "of Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M:", "apply Tapprox to a copy of the bound structure and get the reduced", "the RMSD reduction should go preconceived # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1],", "= np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print", "= lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider,", "Marray) stepPointsReduction = stepPointsReduction - 1 # reduce every value by one to", "no. 10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args: # anm:", "betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD reduction run,", "deformation vector MTM: dot product of the ANM matrix inverse times the ANM", "corresponding entries of the eigenvectors towards the matched atoms ref_chain: The overall matched", "\"modesToConsider, status: \", M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull,", "RMSDReductions and (RMSDReductions[-1] == 1): # # we already reached a RMSD Rreduction", "calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas)", "= calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions:", "return initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self,", "# # Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal #", "from 0 to n-1 listofPreviousBetas: the list of previously calculated betas maxModes: the", "MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous", "protein which is being deformed towards defvec: the deformation vector from proteinFrom to", "timeout at modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i)", "RMSD = 1 at i:\", i # raw_input() # continue if guard <", "column\" # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived:", "Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "RMSD reduction. Args: Marray: Array of normal modes, same shape as getArray from", "are combined in a linear combination with betas. Args: anm_slc: The sliced ANM,", "listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the formula", "timeout at modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\"", "from prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg from", "to speedup other calculations continue if guard < self.utils.config.guard: # calculate betas ##", "proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS of proteinFrom and proteinTo (they need", "already too close or the mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD #", "problematic, returning overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def", "stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list", "by a scipy optimizer fitting, the formula is given in : Moal, <NAME>.,", "the ANM matrix inverse times the ANM matrix previousBetas: previously calculated betas Returns:", "# # we already reached a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) #", "betas by a scipy optimizer fitting, the formula is given in : Moal,", "modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain", "__init__(self, utils): ''' Constructor ''' self.utils = utils def setupMTMforBetas(self, anm): \"\"\" Calculate", "status != 0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status ==", "defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions", "\"M: \", M for i in range(0, len(Mbefore)): M[i] = Mbefore[i] return M", "\"Tapprox\") # apply Tapprox to a copy of the bound structure and get", "< self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole,", "how many modes could be calculated on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3)", "= maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider >", "dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain,", "\"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole", "L_RMS of proteinFrom and proteinTo (they need to be chain matched). Args: proteinFrom:", "RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain,", "anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the", "of the full ANM matrix inverse times # the ANM matrix # modesToConsider:", "# print \"first \",i,\" M: \", M for j in range(0, len(indicesOfHighest)): M", "currentOverlap = 0 overlap.append(currentOverlap) else: # else guard is >= self.utils.config.guard, and the", "listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas by a scipy optimizer fitting, the", "excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = []", "= self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]:", "if len(previousOverlap) == 0: previousOverlap = 0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD)", "listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the formula", "calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list of", "currentOverlap = 0 overlap.append(currentOverlap) else: # else the previous RMSD was actually lower,", "at modes\", i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: #", "def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by", "modes transposed times all ANM modes.\"\"\" M = anm.getArray() Mtrans = M.T MTM", "betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc,", "many modes could be calculated on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) -", "modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM,", "(anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):", "if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue", "overlap.append(overlap[-1]) # print \"already reached RMSD = 1 at i:\", i # raw_input()", "maxModes: the number of modes preconceived: has guard from config been reached or", "dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix):", "Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1 #", "matched chains of the protein to deform towards proteinTo proteinTo: The overall matched", "initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector,", "1 k = range(0, k) arrCopy = arr.copy() if len(k) == 1: Mbefore", "Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the bound structure and", "TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD reduction run, no", "previously calculated betas Returns: the beta coefficents \"\"\" Tdefvec = defvec.getArray() if len(M)", "# defvec: the deformationvector # MTMfull: dot product of the full ANM matrix", "i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i > numModes:", "this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if", "= defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \",", "= M.T MTM = np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10,", "== \", status, \" skipped\" return initialGuess print \"modesToConsider, status: \", modesToConsider, status", "\") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\")", "np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction,", "print \"modesToConsider, status: \", modesToConsider, status # else: # # how many modes", "mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox <", "correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in", "defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using", "modes could be calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess", "modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status =", "0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status,", "[] Mtrans = M.T MTM = np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction =", "Normal Modes in Protein-Protein Docking.\" International Journal of Molecular Sciences 11, no. 10", "open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous", "proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD:", "== -0.0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0,", "it is an investigation on individual proteins, L_RMS does not apply, # return", "keep first k: \", M for i in range(1, len(k)): M = np.dstack((M,", "a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: #", "the deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom and proteinTo", "previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \"", "# print \"modesToConsider, status: \", modesToConsider, status # else: # # how many", "vector referenceName: the name of the reference Returns: RMSDReductions: The reduction list of", "RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox", "elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM,", "Marray: Array of normal modes, same shape as getArray from an ANM object", "be calculated, starting from 0 to n-1 listofPreviousBetas: the list of previously calculated", "modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector,", "it is the first RMSD reduction run, store betas and RMSD reduction results", "Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas = [0] else:", "> self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue # elif RMSDReductions and", "self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox,", "M[i] = Mbefore[i] return M elif len(arr[0]) == len(k): return arr else: M", "# we already reached a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1])", "listofPreviousBetas: the list of previously calculated betas anmTuple: anm tuple as generated by", "\" skipped\" return initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout()", "def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a list of RMSD reductions", "timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD", "or np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM)", "# raw_input() # continue if guard < self.utils.config.guard: # calculate betas try: betas", "reached a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) #", "if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at", "anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest)", "chain atoms from the bound structure defvec: the deformation vector Returns: RMSDReductions: The", "initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return", "self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions", "is >= self.utils.config.guard, and the RMSD reduction should go preconceived # calculate betas", "getArray from an ANM object proteinFrom: The overall matched chains of the protein", "status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0:", "reach the correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for", "be calculated listofPreviousBetas: the list of previously calculated betas maxModes: the number of", "return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\"", "Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the", "with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\"", "RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap):", "if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try:", "previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom,", "np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create", "excludeFirstK, indicesOfHighest): \"\"\" Create an array of np.arrays with the modes specified by", "+str(i)+\" using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1],", "else the previous LRMS was actually lower, the beta calculation was not successful", "mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]):", "\" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas,", "= calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or", "\"modesToConsider, status: \", modesToConsider, status # return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall):", "range(0, k) arrCopy = arr.copy() if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M", "we already reached a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) #", "-0.0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\"", "anm: the ANM with modes defvec: the deformationvector MTMfull: dot product of the", "lower L_RMS\" # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64)", "cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif not preconceived:", "= arr.copy() if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1))", "referenceName, filePrefix): \"\"\" One shot calculation for the RMSD reduction. Args: Marray: Array", "print \"modesToConsider, status: \", modesToConsider, status else: # how many modes could be", "the initial guess for the fitter previousOverlap: The previous overlap previousRMSD: The previous", "= 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous", "RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox =", "change from mob_chain to ref_chain Args: anm_slc: The sliced ANM, with the corresponding", "reductions based increasing number of modes, that are combined in a linear combination", "Array of normal modes, same shape as getArray from an ANM object ref_chain:", "from an ANM object proteinFrom: The overall matched chains of the protein to", "< self.utils.config.guard: # calculate betas ## new Mmode instead of anm_slc and then", "linear combination with betas. RMSD change from mob_chain to ref_chain Args: Marray: Array", "length. Args: listofPreviousBetas: the list of previously calculated Betas modesToConsider: up to how", "\"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" return", "[] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for i in stepPointsReduction:", "is the first L_RMSD reduction run, store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain,", "RMSD timeout at modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole,", "* stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction: if", "vector Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions =", "np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of", "print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec,", "unbound structure and get the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3())", "the protein which is being deformed towards previousBetas: The previous betas, serves as", "name of the reference, for output debugging if the RMSD fitter timeouts filePrefix:", "should be calculated # # Returns: # the beta coefficents # \"\"\" #", "# calculate betas ## new Mmode instead of anm_slc and then [][] Mmode", "proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot calculation for the RMSD reduction. Args:", "initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm,", "actually lower, the beta calculation was not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1])", "RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) #", "anm tuple as generated by Prody preconceived: has guard from config been reached", "maxiter is too low, increase the number maximalIter = self.utils.config.maxIterBetas if M.shape[1] ==", "calculation was not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else", "the ANM matrix previousBetas: previously calculated betas Returns: the beta coefficents \"\"\" Tdefvec", "arrCopy = arr.copy() if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore),", "= np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec,", "coefficents \"\"\" M = anm Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print", "maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: # print \"modesToConsider, nonTrivialModes, status: \",", "given by the indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray()", "calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)", "calculation for the RMSD reduction. Args: Marray: Array of normal modes, same shape", "initial guess vector, padded with 0.0 values to the correct length. Args: listofPreviousBetas:", "not apply, # return RMSD of individual proteins instead return calcRMSD(proteinFrom, proteinTo) def", "reduction run, no need to compare against previous RMSD # store betas and", "self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i > numModes: # temporary, to speedup", "RMSD change from mob_chain to ref_chain Args: anm_slc: The sliced ANM, with the", "the first L_RMSD reduction run, store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain,", "reference, for output debugging if the RMSD fitter timeouts filePrefix: filePrefix, for output", "combined in a linear combination with betas. RMSD change from mob_chain to ref_chain", "timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1])", "list of obtained RMSD values \"\"\" RMSDReductions = [] overlap = [] MTM", "results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print", "-0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return", "modesToConsider < 1: # print \"using one column\" # betas, status = cg(MTM,", "obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas by a scipy optimizer fitting,", "calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap,", "the first RMSD reduction run, no need to compare against previous RMSD #", "specified by the indices in excludeFirstK, and the following modes as given by", "= cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider,", "else: previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else:", "betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox,", "1): # # we already reached a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1])", "\", modesToConsider, status # return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create", "= np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD", "previousBetas = previousBetas[-1] if len(previousOverlap) == 0: previousOverlap = 0 else: previousOverlap =", "print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\"", "using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector", "\"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans,", "j in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M:", "= [] Mtrans = M.T MTM = np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction", "RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = []", "the dot product of all ANM modes transposed times all ANM modes.\"\"\" M", "status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM,", "Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot calculation for the RMSD", "no need to compare against previous RMSD # store betas and RMSD reduction", "betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions,", "to successively calculate the RMSD reductions on referenceName: the name of the reference,", "new Mmode instead of anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print", "status # else: # # how many modes could be calculated on this", "get the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy,", "else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo,", "for the betas, padded with 0.0 to reach the correct length \"\"\" initialGuess", "# print \"highe \",j,\" M: \", M return M[0] def getModeArrayKeepingFirstK(self, arr, k):", "10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array defvec: the", "values to the correct length. Args: listofPreviousBetas: the list of previously calculated Betas", "\"\"\" Get the L_RMS of proteinFrom and proteinTo (they need to be chain", "filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts Returns: RMSDReduction, overlap,", "= np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1 # reduce every value by", "betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout", "successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions =", "vectors are problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap", "maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \",", "nonTrivialModes, \"status == \", status, \" skipped\" # return initialGuess # print \"modesToConsider,", "Docking.\" International Journal of # Molecular Sciences 11, no. 10 (September 28, 2010):", "from timeout import timeout from timeout import TimeoutError from collections import OrderedDict class", "referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing number of", "RMSD values \"\"\" RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole", "previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector =", "to compare against previous RMSD # store betas and RMSD reduction results betasListWhole.append(betas)", "Returns: RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) if", "= listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess", "Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to", "get the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox =", "to reach the correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0", "< 1: # print \"using one column\" # betas, status = cg(MTM, np.dot(Mtrans,", "of modes preconceived: has guard from config been reached or not Returns: the", "cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas,", "a copy of the unbound structure and get the reduced RMSD ref_chain_copy =", "np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider, status #", "np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions:", "in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \",", "np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess =", "scipy optimizer fitting, the formula is given in : Moal, <NAME>., and <NAME>.", "def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by", "all ANM modes.\"\"\" M = anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans, M)", "np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout", "Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole =", "doi:10.3390/ijms11103623. Args: M: the modes array defvec: the deformation vector MTM: dot product", "calculate the RMSD reductions on referenceName: the name of the reference, for output", "excludeFirstK, and the following modes as given by the indices in indicesOfHighest\"\"\" excludeFirstK", "< initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower L_RMS\" #", "anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions", "previous RMSD was actually lower, the beta calculation was not successful guard +=", "> numModes: # temporary, to speedup other calculations continue # calculate betas try:", "did not lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if", "proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox)", "should go preconceived # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i,", "len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") # Mtrans = M.T", "list of obtained RMSD values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions = []", "MTM = np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas = [0] else: previousBetas", "!= len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") # Mtrans =", "filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts Returns: RMSDReduction \"\"\"", "atoms from the bound structure defvec: the deformation vector stepPointsReduction: list of number", "np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting,", "# apply Tapprox to a copy of proteinFrom and get the RMSD towards", "of proteinFrom and proteinTo (they need to be chain matched). Args: proteinFrom: Deformed", "self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole,", "the modes specified by the indices in excludeFirstK, and the following modes as", "if len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") #", "else: # else it is the first RMSD reduction run, store betas and", "listofPreviousBetas: the list of previously calculated Betas modesToConsider: up to how many modes", "vector MTM: dot product of the ANM matrix inverse times the ANM matrix", "len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter =", "with the modes specified by the indices in excludeFirstK, and the following modes", "previous overlap previousRMSD: The previous reduced RMSD defvec: the deformation vector from proteinFrom", "doi:10.3390/ijms11103623. # # Args: # anm: the ANM with modes # defvec: the", "or the mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap", "- 1 # reduce every value by one to have the index match", "of number of modes to successively calculate the RMSD reductions on referenceName: the", "n-1 listofPreviousBetas: the list of previously calculated betas maxModes: the number of modes", "initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M,", "the betas should be calculated # # Returns: # the beta coefficents #", "Returns: the beta coefficents \"\"\" Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print", "has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox <", "vectors are problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec)", "list of obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = []", "vector from proteinFrom to proteinTo referenceName: the name of the reference, for output", "self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or", "Normal # Modes in Protein-Protein Docking.\" International Journal of # Molecular Sciences 11,", "return betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): #", "are already too close or the mode vectors are problematic, returning original RMSD\"", "with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using", "MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i,", "defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) # if", "print \"RMSD timeout at modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "= np.dstack(arrCopy)[0][0] #print \"first M in keep first k: \", M for i", "the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain)", "calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas)", "matched chain atoms from the unbound structure mob_chain: The overall matched chain atoms", "chains of the protein which is being deformed towards previousBetas: The previous betas,", "previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD", "conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status else: # how many modes", "\", np.shape(M) # if len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M)", "Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True)", "lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider,", "go preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec,", "== 0, skipped\" # return initialGuess # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate", "Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter", "with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\" using", "modes array defvec: the deformation vector MTM: dot product of the ANM matrix", "from the bound structure defvec: the deformation vector Returns: RMSDReductions: The reduction list", "ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store", "the formula is given in : Moal, <NAME>., and <NAME>. \"SwarmDock and the", "with modes defvec: the deformationvector MTMfull: dot product of the full ANM matrix", "highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0],", "\", i # else the previous LRMS was actually lower, the beta calculation", "for the RMSD reduction. Args: Marray: Array of normal modes, same shape as", "[][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec,", "timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD", "= [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for i in", "= self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at", "= 0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName,", "to the correct length. Args: listofPreviousBetas: the list of previously calculated Betas modesToConsider:", "if guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM,", "modes preconceived: has guard from config been reached or not Returns: the beta", "in : # # Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of", "defvec, MTM, previousBetas=None): \"\"\" Obtain betas by a scipy optimizer fitting, the formula", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store", "ANM object proteinFrom: The overall matched chains of the protein to deform towards", "# apply Tapprox to a copy of the unbound structure and get the", "does not apply, # return RMSD of individual proteins instead return calcRMSD(proteinFrom, proteinTo)", "np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc,", "Mtrans = M.T MTM = np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain,", "the reference, for output debugging if the RMSD fitter timeouts filePrefix: filePrefix, for", "mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i", "i in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep first", "k += 1 k = range(0, k) arrCopy = arr.copy() if len(k) ==", "M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple,", "Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status return betas # def", "is given in : # # Moal, <NAME>., and <NAME>. \"SwarmDock and the", "i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue # elif RMSDReductions", "print \"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "raw_input() # continue if guard < self.utils.config.guard: # calculate betas try: betas =", "= np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity,", "the betas, padded with 0.0 to reach the correct length \"\"\" initialGuess =", "# elif RMSDReductions and (RMSDReductions[-1] == 1): # # we already reached a", "= [] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM =", "of Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm:", "for output debugging if the RMSD fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\"", "number of modes, that are combined in a linear combination with betas. Args:", "np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures are already", "not Returns: the beta coefficents \"\"\" M = anm.getArray() #print \"first M original:", "with 0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess =", "### M = anm.getArray() # # Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \",", "the mode vectors are problematic\" if previousOverlap: currentOverlap = previousOverlap else: currentOverlap =", "padded with 0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas initialGuess", "preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess,", "The overall matched chain atoms from the unbound structure mob_chain: The overall matched", "# print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" #", "one column\" # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not", "for i in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\"", "from prody.measure.measure import calcDeformVector import numpy as np from prody.dynamics.compare import calcOverlap from", "Sciences 11, no. 10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args:", "overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: # else the previous RMSD was", "mob_chain to ref_chain Args: Marray: Array of normal modes, same shape as getArray", "initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall:", "of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached RMSD", "betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD", "numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous", "Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at", "TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the bound", "= calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and", "at modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes)", "# RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem,", "+str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T)", "= Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas = [0]", "guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt:", "the previous LRMS was actually lower, the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1])", "debugging if the RMSD fitter timeouts filePrefix: filePrefix, for output debugging if the", "calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD", "\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout", "The overall matched chains of the protein which is being deformed towards defvec:", "[] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10,", "\"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n", "betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas by a", "filePrefix, for output debugging if the RMSD fitter timeouts Returns: RMSDReduction, overlap, betas", "= np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess", "debugging purposes filePrefix: file prefix, for output debugging purposes Returns: RMSDReductions: The reduction", "previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector =", "= np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self,", "return initialGuess # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] #", "numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard = 0 for i in stepPointsReduction:", "elif len(arr[0]) == len(k): return arr else: M = np.dstack(arrCopy)[0][0] #print \"first M", "of the ANM matrix inverse times the ANM matrix previousBetas: previously calculated betas", "MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider <", "# ### M = anm.getArray() # # Tdefvec = defvec.getArray() # #print \"shape(Tdefvec):", "Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the", "self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "inverse times the ANM matrix modesToConsider: up to how many modes the betas", "have the index match the range 0 to n-1 print stepPointsReduction betasListWhole =", "(September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args: # anm: the ANM", "and the RMSD reduction should go preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M,", "linear combination with betas. RMSD change from mob_chain to ref_chain Args: anm_slc: The", "nonTrivialModes, \"status == \", status, \" skipped\" return initialGuess print \"modesToConsider, status: \",", "return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get", "sliced ANM, with the corresponding entries of the eigenvectors towards the matched atoms", "for output debugging if the RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans =", "\"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec,", "guard = 0 else: print \"previous RMSD lower at \", i # else", "betas should be calculated listofPreviousBetas: the list of previously calculated betas maxModes: the", "in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \",", "= defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) !=", "# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status", "overlap.append(currentOverlap) else: print \"previous RMSD lower at \", i # else the previous", "for i in range(0, len(Mbefore)): M[i] = Mbefore[i] return M elif len(arr[0]) ==", "ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based", "self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the first L_RMSD", "L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the first L_RMSD reduction run, store L_RMS", "np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN,", "previousBetas=None): \"\"\" Obtain betas by a scipy optimizer fitting, the formula is given", "maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status", "times # the ANM matrix # modesToConsider: up to how many modes the", "np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) # if len(M) != len(Tdefvec): # raise", "self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout at", "\"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas", "except TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\",", "overlap.append(overlap[-1]) else: # else it is the first RMSD reduction run, no need", "self.utils.config.guard: # calculate betas ## new Mmode instead of anm_slc and then [][]", "starting from 0 to n-1 listofPreviousBetas: the list of previously calculated betas maxModes:", "# return initialGuess # print \"modesToConsider, status: \", modesToConsider, status # return betas", "fitting, the formula is given in : Moal, <NAME>., and <NAME>. \"SwarmDock and", "0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix):", "dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return", "MTMfull: dot product of the full ANM matrix inverse times the ANM matrix", "calculated listofPreviousBetas: the list of previously calculated betas maxModes: the number of modes", "0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM)", "maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if", "np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas,", "\" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas,", "\"\"\" M = anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M):", "= cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not None: initialGuess =", "def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array of np.arrays with the", "betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain", "M: the modes array defvec: the deformation vector MTM: dot product of the", "Returns: L_RMS of proteinFrom and proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL =", "Prody preconceived: has guard from config been reached or not Returns: the beta", "beta coefficents # \"\"\" # ### old # ### M = anm.getArray() #", "of the reference, for output debugging purposes filePrefix: file prefix, for output debugging", "vector, padded with 0.0 values to the correct length. Args: listofPreviousBetas: the list", "of the initial guess for the fitter previousOverlap: The previous overlap previousRMSD: The", "def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot calculation for", "calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg", "[] numModes = Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction =", "= proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if", "preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to", "Obtain betas by a scipy optimizer fitting, the formula is given in :", "RMSD lower at \", i # else the previous RMSD was actually lower,", "ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] =", "at \", i # else the previous RMSD was actually lower, the beta", "The reduction list of obtained RMSD values \"\"\" RMSDReductions = [] L_RMSReductions =", "= np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap,", "i:\", i # raw_input() # continue if guard < self.utils.config.guard: # calculate betas", "0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray,", "6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) ==", "getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS of proteinFrom and proteinTo (they", "# elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status =", "tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider, status # else: # # how", "\"already reached RMSD = 1 at i:\", i # raw_input() # continue if", "TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "timeouts filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts Returns: RMSDReduction,", "# overlap.append(overlap[-1]) # print \"already reached RMSD = 1 at i:\", i #", "structures are already too close or the mode vectors are problematic, returning overlap", "list of obtained RMSD values \"\"\" RMSDReductions = [] L_RMSReductions = [] overlap", "= np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self,", "betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain", "previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector", "problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap)", "in Protein-Protein Docking.\" International Journal of Molecular Sciences 11, no. 10 (September 28,", "betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)", "The initial guess vector for the betas, padded with 0.0 to reach the", "guess vector, padded with 0.0 values to the correct length. Args: listofPreviousBetas: the", "this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes)", "# Args: # anm: the ANM with modes # defvec: the deformationvector #", "previously calculated Betas modesToConsider: up to how many modes are given to get", "\"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions = [] overlap = [] numModes =", "# Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal # Modes", "initial guess vector for the betas, padded with 0.0 to reach the correct", "if the RMSD fitter timeouts filePrefix: filePrefix, for output debugging if the RMSD", "on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider,", "# temporary, to speedup other calculations continue # elif RMSDReductions and (RMSDReductions[-1] ==", "file prefix, for output debugging purposes Returns: RMSDReductions: The reduction list of obtained", "dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain,", "by one to have the index match the range 0 to n-1 print", "has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else: # else guard is", "number of modes preconceived: has guard from config been reached or not Returns:", "from collections import OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains method to reduce", "= calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results", "proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD: if", "the RMSD reduction. Args: Marray: Array of normal modes, same shape as getArray", "betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout at modes", "= 0 overlap.append(currentOverlap) else: # else the previous RMSD was actually lower, the", "the deformationvector MTMfull: dot product of the full ANM matrix inverse times the", "maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]", "preconceived=False): # \"\"\" Obtain betas by a scipy optimizer fitting, the formula is", "0, skipped\" # return initialGuess # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess,", "using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector", "two structures are already too close or the mode vectors are problematic\" if", "overlap, betas \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas) ==", "be calculated listofPreviousBetas: the list of previously calculated betas anmTuple: anm tuple as", "reduction list of obtained RMSD values \"\"\" RMSDReductions = [] L_RMSReductions = []", "\", M for i in range(0, len(Mbefore)): M[i] = Mbefore[i] return M elif", "the reference Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" print", "elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans,", "i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD", "initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self, anm,", "np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform import calcRMSD", "calculate betas ## new Mmode instead of anm_slc and then [][] Mmode =", "calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M", "\", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status =", "# return RMSD of individual proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray,", "getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial guess vector, padded with 0.0", "print \"first \",i,\" M: \", M for j in range(0, len(indicesOfHighest)): M =", "a RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print", "not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess,", "a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: print", "obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a", "obtained RMSD values \"\"\" RMSDReductions = [] L_RMSReductions = [] overlap = []", "!= len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate", "arr else: M = np.dstack(arrCopy)[0][0] #print \"first M in keep first k: \",", "status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" # return initialGuess", "= np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self,", "## new Mmode instead of anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i)", "or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem, maybe the two", "Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM", "problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous RMSD", "is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "# ### old # ### M = anm.getArray() # # Tdefvec = defvec.getArray()", "it is the first L_RMSD reduction run, store L_RMS reduction results initial_L_RMS =", "modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox =", "calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a", "\", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\")", "np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc,", "of the deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom and", "= np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec,", "M original: \", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M):", "modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD", "\"\"\" M = anm.getArray() #print \"first M original: \", M Tdefvec = defvec.getArray()", "# return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial guess", "betas and RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD:", "initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter,", "on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)", "import cg from timeout import timeout from timeout import TimeoutError from collections import", "and get the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox", "mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector,", "list of previously calculated betas maxModes: the number of modes preconceived: has guard", "currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot", "Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10,", "betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox", "combination with betas. Args: anm_slc: The sliced ANM, with the corresponding entries of", "to reduce the RMSD between proteins. ''' def __init__(self, utils): ''' Constructor '''", "of RMSD reductions based increasing number of modes, that are combined in a", "deform towards proteinTo proteinTo: The overall matched chains of the protein which is", "len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot", "open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using", "try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD", "problematic\" if previousOverlap: currentOverlap = previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap,", "initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower L_RMS\" # cast", "= np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas,", "proteinFrom: Deformed protein proteinTo: Target protein (target of the deformation vector) investigationsON: \"Complex\"", "RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap", "= self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "reduction run, store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox", "\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes", "= lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print", "if guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM,", "which is being deformed towards previousBetas: The previous betas, serves as part of", "import OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains method to reduce the RMSD", "if len(M) != len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise", "maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider, status # else: # #", "\"Tapprox\") # apply Tapprox to a copy of proteinFrom and get the RMSD", "of anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode)", "maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status", "\"initial M: \", M for i in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray()))", "calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD", "TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of proteinFrom and", "len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try: betas", "== -0.0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\"", "by Prody preconceived: has guard from config been reached or not Returns: the", "class RMSDReducer(object): ''' The RMSDReducer contains method to reduce the RMSD between proteins.", "anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to", "M = anm.getArray() #print \"first M original: \", M Tdefvec = defvec.getArray() #print", "Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M)", "a numerical problem, maybe the two structures are already too close or the", "2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with modes defvec: the deformationvector MTMfull:", "Created on Jan 24, 2014 @author: oliwa ''' from prody.measure.measure import calcDeformVector import", "given in : # # Moal, <NAME>., and <NAME>. \"SwarmDock and the Use", "matched chains of the protein which is being deformed towards defvec: the deformation", "modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print", "if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: print", "def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK)", "# raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") # Mtrans = M.T #", "the fitter previousOverlap: The previous overlap previousRMSD: The previous reduced RMSD defvec: the", "initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first", "as np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector from prody.measure.transform import", "= mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox", "= mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain,", "i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "modesToConsider < 1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas,", "the previous RMSD was actually lower, the beta calculation was not successful guard", "betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status !=", "setupMTMforBetas(self, anm): \"\"\" Calculate and return the dot product of all ANM modes", "given to get the betas Returns: The initial guess vector for the betas,", "collections import OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains method to reduce the", "= np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \", M for j in", "TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the unbound", "RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas", "previous LRMS was actually lower, the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else:", "initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) ==", "the list of previously calculated betas anmTuple: anm tuple as generated by Prody", "RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas)", "\") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\")", "open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous", "initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0", "close or the mode vectors are problematic, returning overlap 0\" currentOverlap = 0", "on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas,", "matrix inverse times the ANM matrix modesToConsider: up to how many modes the", "M elif len(arr[0]) == len(k): return arr else: M = np.dstack(arrCopy)[0][0] #print \"first", "defvec, MTM) except TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\" using previous betas\"", "print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else: # else", "anm Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape print", "modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout", "status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif", "self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas,", "# else: # # how many modes could be calculated on this structure", "if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i > numModes: # temporary, to", "RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction", "ANM matrix inverse times # the ANM matrix # modesToConsider: up to how", "calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap):", "Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM,", "L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS of proteinFrom", "status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \",", "calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD", "lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else", "previous RMSD # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc", "value by one to have the index match the range 0 to n-1", "cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec),", "Journal of # Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. #", "= calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap = [] Mtrans = M.T MTM", "\"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot", "dot product of the full ANM matrix inverse times # the ANM matrix", "apply Tapprox to a copy of the unbound structure and get the reduced", "of modes, that are combined in a linear combination with betas. RMSD change", "(RMSDReductions[-1] == 1): # # we already reached a RMSD Rreduction of 1.0", "modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox =", "= ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn):", "len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\"", "# continue if guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1],", "from scipy.sparse.linalg import cg from timeout import timeout from timeout import TimeoutError from", "# anm: the ANM with modes # defvec: the deformationvector # MTMfull: dot", "previousOverlap = 0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD =", "np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: # print \"modesToConsider,", "with modes # defvec: the deformationvector # MTMfull: dot product of the full", "reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap)", "\"first M in keep first k: \", M for i in range(1, len(k)):", "Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \",", "for j in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\"", "and the Use of Normal Modes in Protein-Protein Docking.\" International Journal of Molecular", "modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout", "= Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction -", "mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec =", "return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix):", "reduction should go preconceived # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM,", "obtained RMSD values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions = [] overlap =", "deformationvector # MTMfull: dot product of the full ANM matrix inverse times #", "= proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else it is an investigation", "M = np.dstack(arrCopy)[0][0] #print \"first M in keep first k: \", M for", "\", modesToConsider, status else: # how many modes could be calculated on this", "how many modes the betas should be calculated # # Returns: # the", "= np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M: \", M for i in", "betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout", "be calculated on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess", "RMSD reduction should go preconceived # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec,", "M: \", M for j in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) #", "nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" # return", "prefix, for output debugging purposes Returns: RMSDReductions: The reduction list of obtained RMSD", "self.utils = utils def setupMTMforBetas(self, anm): \"\"\" Calculate and return the dot product", "times the ANM matrix modesToConsider: up to how many modes the betas should", "Use of Normal Modes in Protein-Protein Docking.\" International Journal of Molecular Sciences 11,", "first L_RMSD reduction run, store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn)", "import numpy as np from prody.dynamics.compare import calcOverlap from prody.dynamics.mode import Vector from", "reference, for output debugging purposes filePrefix: file prefix, for output debugging purposes Returns:", "return the dot product of all ANM modes transposed times all ANM modes.\"\"\"", "# the beta coefficents # \"\"\" # ### old # ### M =", "= listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess)", "\"stepPointsReduction: \", stepPointsReduction guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if", "ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did not", "from mob_chain to ref_chain Args: anm_slc: The sliced ANM, with the corresponding entries", "+str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T)", "return arr else: M = np.dstack(arrCopy)[0][0] #print \"first M in keep first k:", "listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)]) return initialGuess def", "self.utils.config.maxIterBetas # # if modesToConsider < 1: # print \"using one column\" #", "MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status:", "= initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess", "the modes array defvec: the deformation vector MTM: dot product of the ANM", "ANM matrix modesToConsider: up to how many modes the betas should be calculated,", "modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider,", "if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at \", i", "individual proteins, L_RMS does not apply, # return RMSD of individual proteins instead", "return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded", "# Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \",", "beta calculation was not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: #", "# if modesToConsider < 1: # print \"using one column\" # betas, status", "proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or", "the reference Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" #print", "self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas,", "+= 1 k = range(0, k) arrCopy = arr.copy() if len(k) == 1:", "proteinFrom to proteinTo referenceName: the name of the reference, for output debugging if", "guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i,", "np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter,", "mob_chain: The overall matched chain atoms from the bound structure defvec: the deformation", "overlap.append(currentOverlap) else: # else guard is >= self.utils.config.guard, and the RMSD reduction should", "previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1]", "# def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain", "== 0, skipped\" return initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter,", "RMSDReductions = [] overlap = [] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans", "maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:", "i, betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using", "currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous RMSD lower at \", i #", "against previous RMSD # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) #", "maximalIter = self.utils.config.maxIterBetas # # if modesToConsider < 1: # print \"using one", "output debugging if the RMSD fitter timeouts filePrefix: filePrefix, for output debugging if", "= self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM)", "\"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" # return initialGuess", "overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS", "RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions = [] L_RMSReductions", "previousBetas: The previous betas, serves as part of the initial guess for the", "status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0:", "that are combined in a linear combination with betas. Args: anm_slc: The sliced", "modes, that are combined in a linear combination with betas. Args: anm_slc: The", "continue if guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec,", "or the mode vectors are problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo)", "of Normal # Modes in Protein-Protein Docking.\" International Journal of # Molecular Sciences", "structure defvec: the deformation vector Returns: RMSDReductions: The reduction list of obtained RMSD", "betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain", "use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas # # if modesToConsider < 1:", "if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" if overlap: overlap.append(overlap[-1])", "previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector =", "betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous", "successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the", "RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else:", "\"\"\" Create an array of np.arrays with the modes specified by the indices", "range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \", M", "self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else:", "RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction", "mode vectors are problematic, returning overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap,", "increase the number maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status =", "else: previousBetas = previousBetas[-1] if len(previousOverlap) == 0: previousOverlap = 0 else: previousOverlap", "debugging if the RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM", "betas anmTuple: anm tuple as generated by Prody preconceived: has guard from config", "Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a", "np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes, status: \",", "has guard from config been reached or not Returns: the beta coefficents \"\"\"", "stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for i in stepPointsReduction: if", "in a linear combination with betas. Args: anm_slc: The sliced ANM, with the", "= Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the bound structure", "[] overlap = [] numModes = Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans,", "run, store betas and RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox", "overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return", "= [] overlap = [] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans =", "0 overlap.append(currentOverlap) else: # else the previous RMSD was actually lower, the beta", "= cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans,", "indicesOfHighest): \"\"\" Create an array of np.arrays with the modes specified by the", "for i in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep", "run, no need to compare against previous RMSD # store betas and RMSD", "mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox)", "print \"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider,", "\"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) # if len(M) != len(Tdefvec):", "problem\" currentOverlap = 0 overlap.append(currentOverlap) else: # else guard is >= self.utils.config.guard, and", "M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for", "if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has", "the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy,", "#MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole = []", "\", M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas,", "self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue #", "config been reached or not Returns: the beta coefficents \"\"\" M = anm.getArray()", "anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \", M return M[0] def getModeArrayKeepingFirstK(self, arr,", "betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector =", "[] overlap = [] Mtrans = M.T MTM = np.dot(Mtrans, M) betasListWhole =", "betas Returns: the beta coefficents \"\"\" Tdefvec = defvec.getArray() if len(M) != len(Tdefvec):", "go preconceived # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole,", "scipy optimizer fitting, the formula is given in : # # Moal, <NAME>.,", "# how many modes could be calculated on this structure # nonTrivialModes =", "return calcRMSD(proteinFromL, proteinToL) else: # else it is an investigation on individual proteins,", "= 0 overlap.append(currentOverlap) guard = 0 else: print \"previous RMSD lower at \",", "len(M) != len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM", "debugging purposes Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions", "beta coefficents \"\"\" Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \",", "Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg from timeout import timeout", "TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\",", "\"RMSD timeout at modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "M = anm Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \",", "status: \", modesToConsider, status else: # how many modes could be calculated on", "previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD", "previous RMSD was actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1])", "copy of the bound structure and get the reduced RMSD mob_chain_copy = mob_chain.copy()", "the RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans,", "= self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard = 0 for", "the correct length. Args: listofPreviousBetas: the list of previously calculated Betas modesToConsider: up", "to how many modes the betas should be calculated listofPreviousBetas: the list of", "np.isinf(currentOverlap): print \"overlap has a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap =", "return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array", "stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations", "if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue # elif", "transposed times all ANM modes.\"\"\" M = anm.getArray() Mtrans = M.T MTM =", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy()", "The overall matched chain atoms from the bound structure defvec: the deformation vector", "an investigation on individual proteins, L_RMS does not apply, # return RMSD of", "timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) try: betas", "RMSDReduction \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray,", "reduced RMSD defvec: the deformation vector from proteinFrom to proteinTo referenceName: the name", "the indices in excludeFirstK, and the following modes as given by the indices", "excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap = [] Mtrans", "OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains method to reduce the RMSD between", "TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions =", "return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity),", "preconceived: has guard from config been reached or not Returns: the beta coefficents", "TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at modes\",", "MTMfull: dot product of the full ANM matrix inverse times # the ANM", "proteinTo) else: previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError:", "M = np.zeros((len(Mbefore), 1)) #print \"M: \", M for i in range(0, len(Mbefore)):", "status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM,", "problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else:", "nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" return initialGuess", "timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\"", "from timeout import TimeoutError from collections import OrderedDict class RMSDReducer(object): ''' The RMSDReducer", "output debugging purposes filePrefix: file prefix, for output debugging purposes Returns: RMSDReductions: The", "return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas by", "same shape as getArray from an ANM object ref_chain: The overall matched chain", "<NAME>., and <NAME>. \"SwarmDock and the Use of Normal # Modes in Protein-Protein", "range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self,", "= defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) #", "output debugging purposes Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\"", "and <NAME>. \"SwarmDock and the Use of Normal # Modes in Protein-Protein Docking.\"", "is being deformed towards previousBetas: The previous betas, serves as part of the", "defvec: the deformation vector stepPointsReduction: list of number of modes to successively calculate", "# print \"already reached RMSD = 1 at i:\", i # raw_input() #", "and proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL =", "from proteinFrom to proteinTo referenceName: the name of the reference, for output debugging", "calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas,", "\"Individual\" Returns: L_RMS of proteinFrom and proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL", "print \"previous RMSD lower at \", i # else the previous RMSD was", "the ANM with modes defvec: the deformationvector MTMfull: dot product of the full", "nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess betas, status", "proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical", "a copy of the bound structure and get the reduced RMSD mob_chain_copy =", "Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes", "too close or the mode vectors are problematic, returning overlap 0\" currentOverlap =", "# calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True)", "betas, serves as part of the initial guess for the fitter previousOverlap: The", "return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial guess vector,", "MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1: #print", "not lower L_RMS\" # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions,", "previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout at", "dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain,", "defvec): \"\"\" Calculate a list of RMSD reductions based increasing number of modes,", "else: print \"previous RMSD lower at \", i # else the previous RMSD", "old # ### M = anm.getArray() # # Tdefvec = defvec.getArray() # #print", "np.isinf(currentOverlap): print \"overlap has a numerical problem, maybe the two structures are already", "first k: \", M for i in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i]))", "many modes the betas should be calculated # # Returns: # the beta", "increasing number of modes, that are combined in a linear combination with betas.", "values \"\"\" RMSDReductions = [] L_RMSReductions = [] overlap = [] numModes =", "compare against previous RMSD # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox)", "anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc,", "modes could be calculated on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6", "betas by a scipy optimizer fitting, the formula is given in : #", "calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError:", "0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS", "beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap =", "getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array of np.arrays with the modes", "MTM = np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1)", "calculated listofPreviousBetas: the list of previously calculated betas anmTuple: anm tuple as generated", "to speedup other calculations continue # calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec,", "np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray,", "Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" #print \"Marray: \",", "RMSD fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T MTM =", "the RMSD reduction should go preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i)", "bound structure and get the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3())", "Tapprox to a copy of proteinFrom and get the RMSD towards proteinTo proteinFrom_copy", "vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom and proteinTo \"\"\" if", "# maximalIter = self.utils.config.maxIterBetas # # if modesToConsider < 1: # print \"using", "np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem, maybe the two structures", "guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i,", "# else the previous LRMS was actually lower, the beta calculation was not", "numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans, Marray)", "= np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] =", "11, no. 10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args: #", "structures are already too close or the mode vectors are problematic\" RMSD_after_Tapprox =", "\", np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\")", "== 1: Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M: \", M", "28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args: # anm: the ANM with", "self.utils.config.guard, and the RMSD reduction should go preconceived # calculate betas Mmode =", "with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using", "the first RMSD reduction run, store betas and RMSD reduction results initial_RMSD =", "the betas should be calculated, starting from 0 to n-1 listofPreviousBetas: the list", "len(Mbefore)): M[i] = Mbefore[i] return M elif len(arr[0]) == len(k): return arr else:", "len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM =", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: #", "modes to successively calculate the RMSD reductions on referenceName: the name of the", "correct length. Args: listofPreviousBetas: the list of previously calculated Betas modesToConsider: up to", "except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\",", "= anm.getArray() #print \"first M original: \", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec):", "numerical problem, maybe the two structures are already too close or the mode", "referenceName: the name of the reference, for output debugging purposes filePrefix: file prefix,", "\"shape(M): \", np.shape(M) # if len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate betas,", "object ref_chain: The overall matched chain atoms from the unbound structure mob_chain: The", "other calculations continue if guard < self.utils.config.guard: # calculate betas ## new Mmode", "\"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else: # else guard", "values \"\"\" RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole =", "structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider >", "i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using", "3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with modes defvec: the deformationvector MTMfull: dot", "modesToConsider): \"\"\" Create an initial guess vector, padded with 0.0 values to the", "pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original MTM, np.dot(Mtrans,", "if the RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T MTM =", "a copy of proteinFrom and get the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy()", "# with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout at modes \"", "Target protein (target of the deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS", "defvec, MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\",", "Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # #", "the bound structure defvec: the deformation vector Returns: RMSDReductions: The reduction list of", "Protein-Protein Docking.\" International Journal of # Molecular Sciences 11, no. 10 (September 28,", "= mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD", "28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array defvec: the deformation vector", "Marray) if len(previousBetas) == 0: previousBetas = [0] else: previousBetas = previousBetas[-1] if", "then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode,", "= self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD timeout", "or i > numModes: # temporary, to speedup other calculations continue # calculate", "betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status:", "listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) >", "RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64)", "as given by the indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M =", "print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "to get the betas Returns: The initial guess vector for the betas, padded", "calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has", "successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the first L_RMSD reduction run, store", "# print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \"", "preconceived # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc,", "defvec: the deformationvector # MTMfull: dot product of the full ANM matrix inverse", "vector for the betas, padded with 0.0 to reach the correct length \"\"\"", "defvec, referenceName, filePrefix): \"\"\" One shot calculation for the RMSD reduction. Args: Marray:", "== 0.0 or np.linalg.det(MTM) == -0.0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider,", "x in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain,", "deformation vector referenceName: the name of the reference Returns: RMSDReductions: The reduction list", "RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if", "previous reduced RMSD defvec: the deformation vector from proteinFrom to proteinTo referenceName: the", "Journal of Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args:", "except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if", "are combined in a linear combination with betas. RMSD change from mob_chain to", "= previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray,", "k: \", M for i in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print", "from the bound structure defvec: the deformation vector referenceName: the name of the", "RMSD values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions = [] overlap = []", "calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except", "Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas,", "= cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif not", "def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas by a scipy optimizer", "= np.zeros((len(Mbefore), 1)) #print \"M: \", M for i in range(0, len(Mbefore)): M[i]", "overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous RMSD lower at \",", "using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes", "skipped\" # return initialGuess # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter,", "with the corresponding entries of the eigenvectors towards the matched atoms ref_chain: The", "of the unbound structure and get the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords()", "RMSD reduction run, store betas and RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain)", "many modes could be calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) -", "''' def __init__(self, utils): ''' Constructor ''' self.utils = utils def setupMTMforBetas(self, anm):", "# Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM #", "def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with 0.0", "The overall matched chains of the protein to deform towards proteinTo proteinTo: The", "calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError:", "return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\"", "= np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \", M return M[0] def", "2014 @author: oliwa ''' from prody.measure.measure import calcDeformVector import numpy as np from", "self.utils.config.guard, and the RMSD reduction should go preconceived # calculate betas try: betas", "indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M", "based increasing number of modes, that are combined in a linear combination with", "M for i in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in", "of obtained RMSD values \"\"\" RMSDReductions = [] L_RMSReductions = [] overlap =", "else: # else it is the first RMSD reduction run, no need to", "def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas", "stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a", "lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or", "and <NAME>. \"SwarmDock and the Use of Normal Modes in Protein-Protein Docking.\" International", "Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider, status: \", modesToConsider, status # else:", "up to how many modes are given to get the betas Returns: The", "L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at \", i # else the previous", "10 (September 28, 2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args: # anm: the", "np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures", "Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status else: #", "chain atoms from the bound structure defvec: the deformation vector stepPointsReduction: list of", "len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") # Mtrans", "Create an array of np.arrays with the modes specified by the indices in", "problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard = 0 else:", "betas. RMSD change from mob_chain to ref_chain Args: anm_slc: The sliced ANM, with", "= self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first", "currentOverlap = 0 overlap.append(currentOverlap) guard = 0 else: print \"previous RMSD lower at", "excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for", "print \"using one column\" # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] #", "initialGuess # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if", "RMSD reduction should go preconceived # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec,", "# else guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived", "else: # else it is an investigation on individual proteins, L_RMS does not", "of normal modes, same shape as getArray from an ANM object proteinFrom: The", "listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def", "proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot calculation for the RMSD reduction.", "with betas. RMSD change from mob_chain to ref_chain Args: anm_slc: The sliced ANM,", "use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original MTM,", "M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep first \"+str(i)+\": \", M return", "deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo,", "previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector =", "< RMSDReductions[-1]: # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc", "anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas", "k): k += 1 k = range(0, k) arrCopy = arr.copy() if len(k)", "anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a list of RMSD reductions based increasing", "the index match the range 0 to n-1 print stepPointsReduction betasListWhole = [[0]", "np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else: #", "the deformation vector from proteinFrom to proteinTo referenceName: the name of the reference,", "# modesToConsider: up to how many modes the betas should be calculated #", "in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other", "was not successful guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it", "the betas should be calculated listofPreviousBetas: the list of previously calculated betas anmTuple:", "the mode vectors are problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap =", "3623-3648. # doi:10.3390/ijms11103623. # # Args: # anm: the ANM with modes #", "Args: anm_slc: The sliced ANM, with the corresponding entries of the eigenvectors towards", "or np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else:", "protein proteinTo: Target protein (target of the deformation vector) investigationsON: \"Complex\" or \"Individual\"", "using previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector", "\"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) !=", "\"\"\" # ### old # ### M = anm.getArray() # # Tdefvec =", "0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess,", "np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of", "ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox", "self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at modes\",", "try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print", "MTM, previousBetas=None): \"\"\" Obtain betas by a scipy optimizer fitting, the formula is", "ref_chain: The overall matched chain atoms from the unbound structure mob_chain: The overall", "RMSDReductions = [] L_RMSReductions = [] overlap = [] numModes = Marray.shape[1] Mtrans", "the deformationvector # MTMfull: dot product of the full ANM matrix inverse times", "overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate", "else: # how many modes could be calculated on this structure nonTrivialModes =", "number maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status = cg(MTM, np.dot(Mtrans,", "\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes", "on referenceName: the name of the reference, for output debugging purposes filePrefix: file", "print \"first mode did not lower L_RMS\" # cast objects overlap = np.array(overlap,", "else: # else the previous RMSD was actually lower, the beta calculation was", "ANM matrix modesToConsider: up to how many modes the betas should be calculated", "should go preconceived # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i,", "print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec,", "International Journal of # Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.", "[[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction:", "<gh_stars>1-10 ''' Created on Jan 24, 2014 @author: oliwa ''' from prody.measure.measure import", "preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" #", "stepPointsReduction = stepPointsReduction - 1 # reduce every value by one to have", "currentOverlap = 0 overlap.append(currentOverlap) if L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print", "ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas", "for output debugging if the RMSD fitter timeouts filePrefix: filePrefix, for output debugging", "matched chain atoms from the bound structure defvec: the deformation vector referenceName: the", "are already too close or the mode vectors are problematic, returning overlap 0\"", "not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions", "RMSD change from mob_chain to ref_chain Args: Marray: Array of normal modes, same", "defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem, maybe the", "Mbefore = np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M: \", M for i", "defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox,", "# apply Tapprox to a copy of the bound structure and get the", "if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction results", "obtained RMSD values \"\"\" RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0])", "modesToConsider: up to how many modes the betas should be calculated, starting from", "proteins, L_RMS does not apply, # return RMSD of individual proteins instead return", "tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print", "else: previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print", "no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array defvec:", "= [] L_RMSReductions = [] overlap = [] numModes = Marray.shape[1] Mtrans =", "not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "= utils def setupMTMforBetas(self, anm): \"\"\" Calculate and return the dot product of", "modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest =", "already too close or the mode vectors are problematic, returning original RMSD\" RMSD_after_Tapprox", "betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction", "entries of the eigenvectors towards the matched atoms ref_chain: The overall matched chain", "purposes filePrefix: file prefix, for output debugging purposes Returns: RMSDReductions: The reduction list", "not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans,", "are problematic, returning overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout()", "initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self,", "getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with 0.0 values", "if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem, maybe the two", "the unbound structure and get the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() +", "objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64)", "no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: anm: the ANM with modes", "obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a", "reached or not Returns: the beta coefficents \"\"\" M = anm Tdefvec =", "to a copy of proteinFrom and get the RMSD towards proteinTo proteinFrom_copy =", "L_RMS\" # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions", "results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or", "defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" currentOverlap =", "@author: oliwa ''' from prody.measure.measure import calcDeformVector import numpy as np from prody.dynamics.compare", "proteinTo: Target protein (target of the deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns:", "status != 0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \",", "initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did not lower RMSD\" betasListWhole.append(betas) #", "try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout at modes\",", "are given to get the betas Returns: The initial guess vector for the", "status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not None: initialGuess", "self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD timeout at", "i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue if guard <", "MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting,", "initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter,", "utils): ''' Constructor ''' self.utils = utils def setupMTMforBetas(self, anm): \"\"\" Calculate and", "modes, that are combined in a linear combination with betas. RMSD change from", "\", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate", "self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector =", "self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc", "= self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap =", "else: # else it is the first L_RMSD reduction run, store L_RMS reduction", "\", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM", "to n-1 print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"]", "proteinTo proteinTo: The overall matched chains of the protein which is being deformed", "matched chains of the protein which is being deformed towards previousBetas: The previous", "# betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status", "in range(0, len(Mbefore)): M[i] = Mbefore[i] return M elif len(arr[0]) == len(k): return", "timeout at modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: #", "part of the initial guess for the fitter previousOverlap: The previous overlap previousRMSD:", "#print \"first M original: \", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec)", "0, skipped\" return initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for i in", "\"previous RMSD lower at \", i # else the previous RMSD was actually", "skipped\" return initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] #", "i) print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc)", "= self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0", "status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider,", "MTM maximalIter = self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original MTM, np.dot(Mtrans, Tdefvec)", "def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of", "RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo)", "L_RMS of proteinFrom and proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment", "L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox <", "= Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10,", "Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached", "self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox =", "= np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy", "M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas by a scipy optimizer fitting, the", "= Mbefore[i] return M elif len(arr[0]) == len(k): return arr else: M =", "modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" # return initialGuess # betas, status =", "reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if", "0 overlap.append(currentOverlap) guard = 0 else: print \"previous RMSD lower at \", i", "initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status", "RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions = [] overlap", "= self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole = [] stepPointsReduction", "print \"overlap has a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0", "previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based", "how many modes are given to get the betas Returns: The initial guess", "reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else:", "Args: anm: the ANM with modes defvec: the deformationvector MTMfull: dot product of", "else: # # how many modes could be calculated on this structure #", "initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) if", "stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array of np.arrays with", "= np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\"", "= previousBetas[-1] if len(previousOverlap) == 0: previousOverlap = 0 else: previousOverlap = previousOverlap[-1]", "of the reference Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\"", "= [] overlap = [] numModes = Marray.shape[1] Mtrans = Marray.T MTM =", "\"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\"", "as part of the initial guess for the fitter previousOverlap: The previous overlap", "# MTMfull: dot product of the full ANM matrix inverse times # the", "= self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\" using", "> maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create", "between proteins. ''' def __init__(self, utils): ''' Constructor ''' self.utils = utils def", "for x in range(len(initialGuess), modesToConsider+1)]) if len(initialGuess) > maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return", "Returns: The initial guess vector for the betas, padded with 0.0 to reach", "== 1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is", "if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical", "RMSD Rreduction of 1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already", "mode did not lower L_RMS\" # cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions", "def __init__(self, utils): ''' Constructor ''' self.utils = utils def setupMTMforBetas(self, anm): \"\"\"", "i # else the previous RMSD was actually lower, the beta calculation was", "actually lower, the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it", "TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "the list of previously calculated Betas modesToConsider: up to how many modes are", "btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider,", "the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain)", "maxModesOverall: initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an", "maybe the two structures are already too close or the mode vectors are", "MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting,", "len(arr[0]) == len(k): return arr else: M = np.dstack(arrCopy)[0][0] #print \"first M in", "= [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes())", "\"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM,", "the previous RMSD was actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1])", "\"status == \", status, \" skipped\" return initialGuess print \"modesToConsider, status: \", modesToConsider,", "\", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" # return initialGuess # betas, status", "anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = []", "modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: #", "status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM,", "calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use", "RMSD values \"\"\" RMSDReductions = [] L_RMSReductions = [] overlap = [] numModes", "the deformation vector stepPointsReduction: list of number of modes to successively calculate the", "= calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or", "6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if", "at modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\"", "an array of np.arrays with the modes specified by the indices in excludeFirstK,", "print \"already reached RMSD = 1 at i:\", i # raw_input() # continue", "atoms from the unbound structure mob_chain: The overall matched chain atoms from the", "speedup other calculations continue if guard < self.utils.config.guard: # calculate betas ## new", "# betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: #", "betas should be calculated listofPreviousBetas: the list of previously calculated betas anmTuple: anm", "\"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull,", "= Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) betasListWhole", ": Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal Modes in", "= np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def", "= self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]", "betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with", "oliwa ''' from prody.measure.measure import calcDeformVector import numpy as np from prody.dynamics.compare import", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and", "[] overlap = [] numModes = Marray.shape[1] #MTM = self.setupMTMforBetas(anm_slc[0]) Mtrans = Marray.T", "Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print", "obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap =", "range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \", M", "mob_chain) RMSDReductions = [] overlap = [] Mtrans = M.T MTM = np.dot(Mtrans,", "print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M)", "self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode", "if i > self.utils.config.stopRMSDReductionAt or i > numModes: # temporary, to speedup other", "returning overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self,", "len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \", M return", "status, \" skipped\" return initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas", "matrix inverse times # the ANM matrix # modesToConsider: up to how many", "matched atoms ref_chain: The overall matched chain atoms from the unbound structure mob_chain:", "modesToConsider, status # else: # # how many modes could be calculated on", "= anm Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \", M.shape", "myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n \") betas", "1)) #print \"M: \", M for i in range(0, len(Mbefore)): M[i] = Mbefore[i]", "maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self,", "of Normal Modes in Protein-Protein Docking.\" International Journal of Molecular Sciences 11, no.", "\"status == \", status, \" skipped\" # return initialGuess # print \"modesToConsider, status:", "number of modes to successively calculate the RMSD reductions on referenceName: the name", "structure defvec: the deformation vector stepPointsReduction: list of number of modes to successively", "\", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas,", "self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc)", "atoms from the bound structure defvec: the deformation vector Returns: RMSDReductions: The reduction", "has a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else:", "the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap", "\"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)])", "list of previously calculated betas anmTuple: anm tuple as generated by Prody preconceived:", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status =", "and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec)", "structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if", "reference Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" #print \"Marray:", "coefficents \"\"\" M = anm.getArray() #print \"first M original: \", M Tdefvec =", "the beta coefficents # \"\"\" # ### old # ### M = anm.getArray()", "= 0 overlap.append(currentOverlap) else: # else guard is >= self.utils.config.guard, and the RMSD", "status return betas # def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False):", "MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard =", "np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a", "status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" return initialGuess print", "0: previousBetas = [0] else: previousBetas = previousBetas[-1] if len(previousOverlap) == 0: previousOverlap", "or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem, maybe", "= np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep first \"+str(i)+\": \", M return M[0]", "# how many modes could be calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3)", "RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print", "ANM matrix inverse times the ANM matrix modesToConsider: up to how many modes", "too low, increase the number maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1: betas,", "padded with 0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess", "print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "of obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap", "initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with", "debugging if the RMSD fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans =", "to ref_chain Args: anm_slc: The sliced ANM, with the corresponding entries of the", "in Protein-Protein Docking.\" International Journal of # Molecular Sciences 11, no. 10 (September", "the bound structure defvec: the deformation vector stepPointsReduction: list of number of modes", "from an ANM object ref_chain: The overall matched chain atoms from the unbound", "RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain,", "MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i,", "shape as getArray from an ANM object proteinFrom: The overall matched chains of", "at \", i # else the previous LRMS was actually lower, the beta", "obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by a", "= range(0, k) arrCopy = arr.copy() if len(k) == 1: Mbefore = np.array(np.dstack(arrCopy)[0][0])", "index match the range 0 to n-1 print stepPointsReduction betasListWhole = [[0] *", "in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt or i > numModes: #", "if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: # else the previous", "myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas", "except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\",", "overlap.append(currentOverlap) guard = 0 else: print \"previous RMSD lower at \", i #", "= self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector =", "np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider):", "np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if status != 0: print \"modesToConsider, nonTrivialModes,", "apply, # return RMSD of individual proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self,", "the ANM with modes # defvec: the deformationvector # MTMfull: dot product of", "The previous overlap previousRMSD: The previous reduced RMSD defvec: the deformation vector from", "up to how many modes the betas should be calculated listofPreviousBetas: the list", "initialGuess = initialGuess[:maxModesOverall] return initialGuess def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial", "= Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the unbound structure", "= previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap):", "output debugging if the RMSD fitter timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans", "fitting, the formula is given in : # # Moal, <NAME>., and <NAME>.", "many modes are given to get the betas Returns: The initial guess vector", "\"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox =", "Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a", "calcRMSD from scipy.sparse.linalg import cg from timeout import timeout from timeout import TimeoutError", "i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout", "reduction list of obtained RMSD values \"\"\" RMSDReductions = [] overlap = []", "# # if modesToConsider < 1: # print \"using one column\" # betas,", "RMSD_after_Tapprox, currentOverlap, betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One", "= self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "the indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial", "ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions", "myfile.write(referenceName+\" RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas =", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status =", "proteinFrom and proteinTo (they need to be chain matched). Args: proteinFrom: Deformed protein", "The reduction list of obtained RMSD values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions", "[] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard =", "# if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) ==", "structure and get the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox", "i in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M:", "self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for", "the reference, for output debugging purposes filePrefix: file prefix, for output debugging purposes", "self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\" using previous", "are problematic\" RMSD_after_Tapprox = previousRMSD # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if", "with 0.0 values to the correct length. Args: listofPreviousBetas: the list of previously", "proteinToL) else: # else it is an investigation on individual proteins, L_RMS does", "previousBetas = [0] else: previousBetas = previousBetas[-1] if len(previousOverlap) == 0: previousOverlap =", "(they need to be chain matched). Args: proteinFrom: Deformed protein proteinTo: Target protein", "try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD", "initialStep=1) print \"stepPointsReduction: \", stepPointsReduction guard = 0 for i in stepPointsReduction: if", "# reduce every value by one to have the index match the range", "RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions:", "anmTuple, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the formula is", "Mtrans = Marray.T MTM = np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas =", "Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except TimeoutError: print \"RMSD timeout at", "Returns: the beta coefficents \"\"\" M = anm.getArray() #print \"first M original: \",", "= maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):", "anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" # with", "atoms from the bound structure defvec: the deformation vector referenceName: the name of", "towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) #", "\"modesToConsider, status: \", modesToConsider, status # else: # # how many modes could", "# # Returns: # the beta coefficents # \"\"\" # ### old #", "RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the", "matrix # modesToConsider: up to how many modes the betas should be calculated", "# initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM)", "\"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous", "at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout", "betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout", "should be calculated listofPreviousBetas: the list of previously calculated betas maxModes: the number", "[x*0.0 for x in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK,", "maxModesOverall): \"\"\" Create an initial guess vector, padded with 0.0 values to the", "np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsExpandingSet(self, Marray,", "proteinTo, investigationsOn): \"\"\" Get the L_RMS of proteinFrom and proteinTo (they need to", "np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" if overlap: overlap.append(overlap[-1]) else:", "not Returns: the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() if", "the correct length \"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x", "RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if", "modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox =", "np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas = [0] else: previousBetas = previousBetas[-1]", "Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: # print \"modesToConsider, nonTrivialModes,", "L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower L_RMS\" # cast objects", "TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status else: # how many modes could", "modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" # return initialGuess # print", "need to be chain matched). Args: proteinFrom: Deformed protein proteinTo: Target protein (target", "structure defvec: the deformation vector referenceName: the name of the reference Returns: RMSDReductions:", "is being deformed towards defvec: the deformation vector from proteinFrom to proteinTo referenceName:", "was actually lower, the beta calculation was not successful guard += 1 betasListWhole.append(betasListWhole[-1])", "filePrefix, for output debugging if the RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans", "Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of Normal Modes in Protein-Protein", "overlap = [] numModes = Marray.shape[1] Mtrans = Marray.T MTM = np.dot(Mtrans, Marray)", "if previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM,", "try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError: print", "modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:", "previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec,", "the bound structure defvec: the deformation vector referenceName: the name of the reference", "reached RMSD = 1 at i:\", i # raw_input() # continue if guard", "[] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard", "ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\"", "modes, same shape as getArray from an ANM object proteinFrom: The overall matched", "betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap):", "optimizer fitting, the formula is given in : Moal, <NAME>., and <NAME>. \"SwarmDock", "print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc) Tapprox", "M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas by a", "preconceived=True) except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" with", "the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() if len(M) !=", "ref_chain, self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did", "RMSD reduction run, no need to compare against previous RMSD # store betas", "= np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy", "reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox", "\", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" # return initialGuess #", "continue if guard < self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec,", "self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes)", "\" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i, numModes) Tapprox =", "are already too close or the mode vectors are problematic\" if previousOverlap: currentOverlap", "formula is given in : Moal, <NAME>., and <NAME>. \"SwarmDock and the Use", "calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD", "or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures are", "preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter,", "the L_RMS of proteinFrom and proteinTo (they need to be chain matched). Args:", "# temporary, to speedup other calculations continue # calculate betas try: betas =", "stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list", "ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox)", "of proteinFrom and get the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() +", "an ANM object proteinFrom: The overall matched chains of the protein to deform", "calcRMSD(mob_chain, ref_chain) if RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did", "could be calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess =", "(September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array defvec: the deformation", "# # Args: # anm: the ANM with modes # defvec: the deformationvector", "by the indices in excludeFirstK, and the following modes as given by the", "Returns: # the beta coefficents # \"\"\" # ### old # ### M", "an initial guess vector, padded with 0.0 values to the correct length. Args:", "M = anm.getArray() # # Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec)", "maxiter=maximalIter)[0:2] else: if previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1]) betas, status", "= self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or", "@timeout() def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None): \"\"\" Obtain betas by a scipy", "Args: M: the modes array defvec: the deformation vector MTM: dot product of", "# MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas #", "# betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # print \"modesToConsider,", "continue # elif RMSDReductions and (RMSDReductions[-1] == 1): # # we already reached", "status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status", "self.utils.config.investigationsOn) if L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not", "np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0", "!= 0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status,", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1],", "= cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \", M.shape[1], status return", "np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print \"highe \",j,\" M: \", M return M[0] def getModeArrayKeepingFirstK(self,", "a scipy optimizer fitting, the formula is given in : Moal, <NAME>., and", "= 1 at i:\", i # raw_input() # continue if guard < self.utils.config.guard:", "tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status:", "list of previously calculated Betas modesToConsider: up to how many modes are given", "objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction", "dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK,", "except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" # with", "betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap = calcOverlap(TapproxVector,", "as generated by Prody preconceived: has guard from config been reached or not", "import Vector from prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg from timeout import", "collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain): indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M =", "status: \", modesToConsider, nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess betas, status =", "np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of", "\"det(MTM) == 0, skipped\" return initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess,", "= [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction =", "M in keep first k: \", M for i in range(1, len(k)): M", "betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print", "to how many modes the betas should be calculated # # Returns: #", "<NAME>. \"SwarmDock and the Use of Normal # Modes in Protein-Protein Docking.\" International", "the number maximalIter = self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status = cg(MTM,", "cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap,", "return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\"", "self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas", "print \"RMSD timeout at modes\", i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\")", "TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD", "11, no. 10 (September 28, 2010): 3623-3648. doi:10.3390/ijms11103623. Args: M: the modes array", "print \"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)", "output debugging if the RMSD fitter timeouts Returns: RMSDReduction \"\"\" Mtrans = Marray.T", "up to how many modes the betas should be calculated # # Returns:", "RMSDReductions[-1]: # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap", "\"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) !=", "= M.T MTM = np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain,", "RMSDReductions, overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array of", "of modes, that are combined in a linear combination with betas. Args: anm_slc:", "betas. RMSD change from mob_chain to ref_chain Args: Marray: Array of normal modes,", "anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by a scipy", "+str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1])", "proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap,", "The sliced ANM, with the corresponding entries of the eigenvectors towards the matched", "L_RMSReductions: if L_RMSD_after_Tapprox < L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at \",", "i # else the previous LRMS was actually lower, the beta calculation was", "defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap = [] Mtrans = M.T", "def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate", "MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\")", "vectors are problematic, returning overlap 0\" currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas", "RMSDReductions: The reduction list of obtained RMSD values \"\"\" #print \"Marray: \", Marray[0:2]", "referenceName: the name of the reference, for output debugging if the RMSD fitter", "def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by", "Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider,", "2010): 3623-3648. # doi:10.3390/ijms11103623. # # Args: # anm: the ANM with modes", "= np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy", "= 0 else: print \"previous RMSD lower at \", i # else the", "how many modes could be calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3)", "should be calculated, starting from 0 to n-1 listofPreviousBetas: the list of previously", "overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a", "= MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas # # if", "i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1],", "print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec,", "proteinFrom and get the RMSD towards proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3())", "chain matched). Args: proteinFrom: Deformed protein proteinTo: Target protein (target of the deformation", "len(Tdefvec): print \"len(M): \", M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas,", "reductions on referenceName: the name of the reference, for output debugging purposes filePrefix:", "print \"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"]", "return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a list of", "at modes\", Marray.shape[1],\" using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD", "reduction. Args: Marray: Array of normal modes, same shape as getArray from an", "M = anm Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M): \",", "ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") # Mtrans = M.T # MTM =", "= self.utils.config.maxIterBetas if M.shape[1] == 1: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]", "initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)]) return", "instead of anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \",", "product of the ANM matrix inverse times the ANM matrix previousBetas: previously calculated", "RMSD reduction should go preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas", "product of the full ANM matrix inverse times the ANM matrix modesToConsider: up", "if status != 0: print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status ==", "apply Tapprox to a copy of proteinFrom and get the RMSD towards proteinTo", "anm_slc[excludeFirstK[i]].getArray())) # print \"first \",i,\" M: \", M for j in range(0, len(indicesOfHighest)):", "and get the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox =", "the eigenvectors towards the matched atoms ref_chain: The overall matched chain atoms from", "nonTrivialModes, \"det(MTM) == 0, skipped\" # return initialGuess # betas, status = cg(MTM,", "# the ANM matrix # modesToConsider: up to how many modes the betas", "def getL_RMS(self, proteinFrom, proteinTo, investigationsOn): \"\"\" Get the L_RMS of proteinFrom and proteinTo", "+ TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox):", "could be calculated on this structure nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess", "\", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M)", "the RMSD reduction should go preconceived # calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T,", "proteinTo proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD", "betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0 for i", "close or the mode vectors are problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom,", "modesToConsider, status # return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an", "= self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes) except TimeoutError: print \"RMSD timeout at", "i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to", "at modes\", i, \"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD", "vectors are problematic\" if previousOverlap: currentOverlap = previousOverlap else: currentOverlap = 0 return", "= np.dot(Mtrans, Marray) if len(previousBetas) == 0: previousBetas = [0] else: previousBetas =", "betas = self.getInitialGuess([0], Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i", "== 0: previousRMSD = calcRMSD(proteinFrom, proteinTo) else: previousRMSD = previousRMSD[-1] try: betas =", "guard += 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first", "reduction run, store betas and RMSD reduction results initial_RMSD = calcRMSD(mob_chain, ref_chain) if", "nonTrivialModes, \"det(MTM) == 0, skipped\" return initialGuess betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox,", "tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] if", "\"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) betas,", "# if len(M) != len(Tdefvec): # raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\")", "if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status: \",", "self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue if", "nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # if modesToConsider", "= anm.getArray() Mtrans = M.T MTM = np.dot(Mtrans, M) return MTM def calcRMSDReductions(self,", "the number of modes preconceived: has guard from config been reached or not", "= np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard =", "np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not None: initialGuess = self.expandInitialGuess(previousBetas, M.shape[1])", "RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox)", "filePrefix): \"\"\" One shot calculation for the RMSD reduction. Args: Marray: Array of", "fitter previousOverlap: The previous overlap previousRMSD: The previous reduced RMSD defvec: the deformation", "RMSD reductions based increasing number of modes, that are combined in a linear", "def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of", "Modes in Protein-Protein Docking.\" International Journal of Molecular Sciences 11, no. 10 (September", "betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError:", "that are combined in a linear combination with betas. RMSD change from mob_chain", "beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() if len(M) != len(Tdefvec):", "L_RMS lower at \", i # else the previous LRMS was actually lower,", "self.utils.config.stopRMSDReductionAt or i > numModes: # temporary, to speedup other calculations continue #", "@timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas", "RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction results betasListWhole.append(betas)", "previously calculated betas maxModes: the number of modes preconceived: has guard from config", "(nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes, status:", "1.0 # betasListWhole.append(betasListWhole[-1]) # RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached RMSD =", "6 initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0", "MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec): \"\"\" Calculate a list of RMSD", "= anm.getArray() # # Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) #", "return initialGuess print \"modesToConsider, status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneralizedExpanding(self,", "store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox) # calc overlap currentOverlap =", "#print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): print", "RMSD_after_Tapprox < initial_RMSD: RMSDReductions.append(RMSD_after_Tapprox) else: RMSDReductions.append(initial_RMSD) print \"first mode did not lower RMSD\"", "proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment", "MTM, i, betasListWhole, anm_slc) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: # print \"modesToConsider, nonTrivialModes, status:", "change from mob_chain to ref_chain Args: Marray: Array of normal modes, same shape", "\"first mode did not lower RMSD\" betasListWhole.append(betas) # calc overlap currentOverlap = calcOverlap(TapproxVector,", "Args: Marray: Array of normal modes, same shape as getArray from an ANM", "0 overlap.append(currentOverlap) else: # else guard is >= self.utils.config.guard, and the RMSD reduction", "MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "= self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i,", "betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") #", "lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects", "unbound structure mob_chain: The overall matched chain atoms from the bound structure defvec:", "from prody.measure.transform import calcRMSD from scipy.sparse.linalg import cg from timeout import timeout from", "overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: # else the previous RMSD", "protein which is being deformed towards previousBetas: The previous betas, serves as part", "listofPreviousBetas: the list of previously calculated betas maxModes: the number of modes preconceived:", "RMSDReducer contains method to reduce the RMSD between proteins. ''' def __init__(self, utils):", "M.T MTM = np.dot(Mtrans, M) return MTM def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec):", "M.shape print \"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans", "betas ## new Mmode instead of anm_slc and then [][] Mmode = self.getModeArrayKeepingFirstK(M,", "if RMSD_after_Tapprox < RMSDReductions[-1]: # store betas and RMSD reduction results betasListWhole.append(betas) RMSDReductions.append(RMSD_after_Tapprox)", "ANM with modes defvec: the deformationvector MTMfull: dot product of the full ANM", "array of np.arrays with the modes specified by the indices in excludeFirstK, and", "investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL,", "by the indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print", "is an investigation on individual proteins, L_RMS does not apply, # return RMSD", "< self.utils.config.guard: # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole,", "RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a numerical problem, maybe the two structures are already", "nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider", "[x*0.0 for x in range(len(initialGuess), modesToConsider+1)]) return initialGuess def expandInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\"", "= Marray.T MTM = np.dot(Mtrans, Marray) try: betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM) except", "x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0,", "status return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False):", "RMSDReducer(object): ''' The RMSDReducer contains method to reduce the RMSD between proteins. '''", "#(maxModes[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if", "anmTuple, preconceived=False): # \"\"\" Obtain betas by a scipy optimizer fitting, the formula", "\"using one column\" # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif", "MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas by a scipy optimizer", "else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower L_RMS\" # cast objects overlap", "of the reference, for output debugging if the RMSD fitter timeouts filePrefix: filePrefix,", "\") betas = self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1]) TapproxVector = Vector(Tapprox, \"Tapprox\")", "MTM # maximalIter = self.utils.config.maxIterBetas # # if modesToConsider < 1: # print", "= range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for i", "a list of RMSD reductions based increasing number of modes, that are combined", "\\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else it is an investigation on individual", "0 else: print \"previous RMSD lower at \", i # else the previous", "reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if", "temporary, to speedup other calculations continue # elif RMSDReductions and (RMSDReductions[-1] == 1):", "i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup", "the protein to deform towards proteinTo proteinTo: The overall matched chains of the", "\"det(MTM) == 0, skipped\" # return initialGuess # betas, status = cg(MTM, np.dot(Mtrans,", "calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print \"overlap has a numerical problem\" if", "proteinFrom_copy = proteinFrom.copy() proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison", "was actually lower, the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else", "status: \", modesToConsider, status elif not preconceived: initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas,", "import timeout from timeout import TimeoutError from collections import OrderedDict class RMSDReducer(object): '''", "betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" #", "timeout from timeout import TimeoutError from collections import OrderedDict class RMSDReducer(object): ''' The", "# use pre-calculated MTM # maximalIter = self.utils.config.maxIterBetas # # if modesToConsider <", "len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM =", "def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall): \"\"\" Create an initial guess vector, padded with", "+= 1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD", "previousOverlap: The previous overlap previousRMSD: The previous reduced RMSD defvec: the deformation vector", "cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] # if status != 0: # print", "guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived # calculate", "and then [][] Mmode = self.getModeArrayKeepingFirstK(M, i) print \"Mmode: \", np.shape(Mmode) betas =", "Jan 24, 2014 @author: oliwa ''' from prody.measure.measure import calcDeformVector import numpy as", "OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i >", "beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the first", "\"highe \",j,\" M: \", M return M[0] def getModeArrayKeepingFirstK(self, arr, k): k +=", "indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK) M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain,", "first RMSD reduction run, store betas and RMSD reduction results initial_RMSD = calcRMSD(mob_chain,", "be calculated # # Returns: # the beta coefficents # \"\"\" # ###", "timeout at modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuessExpanding(betasListWhole, i,", "= self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at", "calculated, starting from 0 to n-1 listofPreviousBetas: the list of previously calculated betas", "calculations continue if guard < self.utils.config.guard: # calculate betas ## new Mmode instead", "# cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions =", "modes\", i, \"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\"", "the full ANM matrix inverse times # the ANM matrix # modesToConsider: up", "Tapprox to a copy of the bound structure and get the reduced RMSD", "def getInitialGuess(self, listofPreviousBetas, modesToConsider): \"\"\" Create an initial guess vector, padded with 0.0", "RMSD reductions on referenceName: the name of the reference, for output debugging purposes", "to have the index match the range 0 to n-1 print stepPointsReduction betasListWhole", "10, anm_slc[0].numModes()) guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i", "contains method to reduce the RMSD between proteins. ''' def __init__(self, utils): '''", "self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn) deformationSnapshots[i] = mob_chain_copy.copy() if RMSDReductions: if RMSD_after_Tapprox < RMSDReductions[-1]: #", "other calculations continue # elif RMSDReductions and (RMSDReductions[-1] == 1): # # we", "ref_chain Args: anm_slc: The sliced ANM, with the corresponding entries of the eigenvectors", "= self.setupMTMforBetas(anm_slc[0]) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard = 0", "Mtrans = M.T MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM maximalIter = self.utils.config.maxIterBetas", "# Modes in Protein-Protein Docking.\" International Journal of # Molecular Sciences 11, no.", "\"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \", status, \" skipped\" #", "= self.getInitialGuess(betasListWhole, i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "numModes: # temporary, to speedup other calculations continue # calculate betas try: betas", "filePrefix: file prefix, for output debugging purposes Returns: RMSDReductions: The reduction list of", "self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] #", "= anm_slc[excludeFirstK[0]].getArray() #print \"initial M: \", M for i in range(1, len(excludeFirstK)): M", "M: \", M return M[0] def getModeArrayKeepingFirstK(self, arr, k): k += 1 k", "was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) # cast objects overlap = np.array(overlap, dtype=np.float64)", "ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T # the default maxiter", "betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\"", "#print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M)", "0 to n-1 print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict()", "preconceived # calculate betas Mmode = self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM,", "times the ANM matrix previousBetas: previously calculated betas Returns: the beta coefficents \"\"\"", "Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions = []", "as myfile: # myfile.write(referenceName+\" RMSD timeout at modes \" +str(i)+\" using previous betas\\n", "matrix modesToConsider: up to how many modes the betas should be calculated, starting", "maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider,", "np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName,", "\", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = [] MTM = self.setupMTMforBetas(anm_slc[0]) betasListWhole =", "default maxiter is too low, increase the number maximalIter = self.utils.config.maxIterBetas if M.shape[1]", "already too close or the mode vectors are problematic, returning overlap 0\" currentOverlap", "the default maxiter is too low, increase the number maximalIter = self.utils.config.maxIterBetas if", "\"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile:", "indices in excludeFirstK, and the following modes as given by the indices in", "betas def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix): \"\"\" One shot calculation", "L_RMSReductions[-1]: L_RMSReductions.append(L_RMSD_after_Tapprox) else: print \"previous L_RMS lower at \", i # else the", "previousRMSD: if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox: print \"RMSD_after_Tapprox has a", "np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans, Tdefvec) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]", "i in range(0, len(Mbefore)): M[i] = Mbefore[i] return M elif len(arr[0]) == len(k):", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status", "\"len(Tdefvec): \", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T", "!= len(Tdefvec)\") # Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated", "overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a", "the betas should be calculated listofPreviousBetas: the list of previously calculated betas maxModes:", "\", len(Tdefvec) raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T #", "protein to deform towards proteinTo proteinTo: The overall matched chains of the protein", "be chain matched). Args: proteinFrom: Deformed protein proteinTo: Target protein (target of the", "or \"Individual\" Returns: L_RMS of proteinFrom and proteinTo \"\"\" if investigationsOn == \"Complex\":", "# # how many modes could be calculated on this structure # nonTrivialModes", "anm_slc) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\",", "defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas by a scipy", "np.dot(Mtrans, Marray) stepPointsReduction = stepPointsReduction - 1 # reduce every value by one", "getArray from an ANM object ref_chain: The overall matched chain atoms from the", "i) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply Tapprox to", "of proteinFrom and proteinTo \"\"\" if investigationsOn == \"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"')", "atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status else: # how", "from config been reached or not Returns: the beta coefficents \"\"\" M =", "np.array(np.dstack(arrCopy)[0][0]) M = np.zeros((len(Mbefore), 1)) #print \"M: \", M for i in range(0,", "# # Tdefvec = defvec.getArray() # #print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M):", "!= len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans = M.T MTM", "and the following modes as given by the indices in indicesOfHighest\"\"\" excludeFirstK =", "actually lower, the beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: #", "previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: # myfile.write(referenceName+\" RMSD timeout at", "on Jan 24, 2014 @author: oliwa ''' from prody.measure.measure import calcDeformVector import numpy", "L_RMSD_after_Tapprox < initial_L_RMS: L_RMSReductions.append(L_RMSD_after_Tapprox) else: L_RMSReductions.append(initial_L_RMS) print \"first mode did not lower L_RMS\"", "len(previousOverlap) == 0: previousOverlap = 0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD) ==", "values \"\"\" #print \"Marray: \", Marray[0:2] RMSDReductions = [] overlap = [] numModes", "MTM = np.dot(Mtrans, M) betasListWhole = [] stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes()) guard", "mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas", "dot product of the ANM matrix inverse times the ANM matrix previousBetas: previously", "''' Created on Jan 24, 2014 @author: oliwa ''' from prody.measure.measure import calcDeformVector", "(target of the deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of proteinFrom", "print \"previous L_RMS lower at \", i # else the previous LRMS was", "anm_slc: The sliced ANM, with the corresponding entries of the eigenvectors towards the", "proteinTo (they need to be chain matched). Args: proteinFrom: Deformed protein proteinTo: Target", "previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing number", "of previously calculated betas anmTuple: anm tuple as generated by Prody preconceived: has", "self.getInitialGuess(listofPreviousBetas, modesToConsider) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) ==", "return initialGuess # print \"modesToConsider, status: \", modesToConsider, status # return betas def", "# initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess,", "defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec):", "1 betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD reduction", "print \"modesToConsider, status: \", modesToConsider, status # return betas def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider,", "to a copy of the unbound structure and get the reduced RMSD ref_chain_copy", "The reduction list of obtained RMSD values \"\"\" RMSDReductions = [] overlap =", "return RMSDReductions, overlap, stepPointsReduction def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\"", "modes defvec: the deformationvector MTMfull: dot product of the full ANM matrix inverse", "np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy()", "calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a", "initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self,", "maxModes, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the formula is", "len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep first \"+str(i)+\": \", M", "proteinTo referenceName: the name of the reference, for output debugging if the RMSD", "referenceName: the name of the reference Returns: RMSDReductions: The reduction list of obtained", "modes specified by the indices in excludeFirstK, and the following modes as given", "mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy,", "the matched atoms ref_chain: The overall matched chain atoms from the unbound structure", "for output debugging purposes filePrefix: file prefix, for output debugging purposes Returns: RMSDReductions:", "indices in indicesOfHighest\"\"\" excludeFirstK = range(0, excludeFirstK) M = anm_slc[excludeFirstK[0]].getArray() #print \"initial M:", "np.arrays with the modes specified by the indices in excludeFirstK, and the following", "try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print", "one to have the index match the range 0 to n-1 print stepPointsReduction", "except TimeoutError: print \"RMSD timeout at modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\",", "return RMSD of individual proteins instead return calcRMSD(proteinFrom, proteinTo) def calcRMSDReductionFromTo(self, Marray, proteinFrom,", "modes, same shape as getArray from an ANM object ref_chain: The overall matched", "of normal modes, same shape as getArray from an ANM object ref_chain: The", "#print \"shape(Tdefvec): \", np.shape(Tdefvec) # #print \"shape(M): \", np.shape(M) # if len(M) !=", "not Returns: the beta coefficents \"\"\" M = anm Tdefvec = defvec.getArray() #print", "return RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple,", "#print \"M: \", M for i in range(0, len(Mbefore)): M[i] = Mbefore[i] return", "= self.getInitialGuess(listofPreviousBetas, modesToConsider) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] #", "timeouts Returns: RMSDReduction, overlap, betas \"\"\" Mtrans = Marray.T MTM = np.dot(Mtrans, Marray)", "\", M for j in range(0, len(indicesOfHighest)): M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray())) # print", "else: betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] print \"modesToConsider, status: \",", "print \"RMSD timeout at modes\", i,\"using previous betas\" # with open(\"RMSDtimeoutMAX\"+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as", "timeout import TimeoutError from collections import OrderedDict class RMSDReducer(object): ''' The RMSDReducer contains", "= proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else", "modes\", i,\"using previous betas\" with open(\"RMSDtimeout\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout at", "L_RMS does not apply, # return RMSD of individual proteins instead return calcRMSD(proteinFrom,", "MTM) except TimeoutError: print \"RMSD timeout at modes\", Marray.shape[1],\" using previous betas\" with", "\\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else it is", "at modes\", i, \"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD", "proteinTo) # calc overlap currentOverlap = calcOverlap(TapproxVector, defvec) if np.isnan(currentOverlap) or np.isinf(currentOverlap): print", "with betas. RMSD change from mob_chain to ref_chain Args: Marray: Array of normal", "calculated on this structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess =", "overlap, stepPointsReduction def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest): \"\"\" Create an array of np.arrays", "betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] else: if previousBetas is not None:", "overlap.append(overlap[-1]) else: # else it is the first RMSD reduction run, store betas", "= self.expandInitialGuess(previousBetas, M.shape[1]) betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2] else:", "proteinFrom: The overall matched chains of the protein to deform towards proteinTo proteinTo:", "anm): \"\"\" Calculate and return the dot product of all ANM modes transposed", "reduction should go preconceived # calculate betas try: betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM,", "mob_chain, defvec, stepPointsReduction, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based", "guard from config been reached or not Returns: the beta coefficents \"\"\" M", "\"Complex\": proteinFromL = proteinFrom.select('segment \\\"L.\\\"') proteinToL = proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else:", "= calcRMSD(proteinFrom_copy, proteinTo) # RMSD comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has", "ANM with modes # defvec: the deformationvector # MTMfull: dot product of the", "from the unbound structure mob_chain: The overall matched chain atoms from the bound", "for x in range(len(initialGuess), modesToConsider)]) return initialGuess def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc,", "\"first M original: \", M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print", "if len(M) != len(Tdefvec): raise ValueError(\"Cannot calculate betas, len(M) != len(Tdefvec)\") Mtrans =", "the beta calculation was not successful L_RMSReductions.append(L_RMSReductions[-1]) else: # else it is the", "of the protein which is being deformed towards previousBetas: The previous betas, serves", "modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): \"\"\" Obtain betas by a scipy optimizer fitting, the", "mob_chain to ref_chain Args: anm_slc: The sliced ANM, with the corresponding entries of", "be calculated on this structure nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 initialGuess = self.getInitialGuess(listofPreviousBetas,", "self.utils.config.maxIterBetas if modesToConsider < 1: #print \"original MTM, np.dot(Mtrans, Tdefvec) \", MTM, np.dot(Mtrans,", "betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True) except TimeoutError:", "ANM modes transposed times all ANM modes.\"\"\" M = anm.getArray() Mtrans = M.T", "1: # print \"using one column\" # betas, status = cg(MTM, np.dot(Mtrans, Tdefvec),", "overlap.append(currentOverlap) else: # else the previous RMSD was actually lower, the beta calculation", "= OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i", "protein (target of the deformation vector) investigationsON: \"Complex\" or \"Individual\" Returns: L_RMS of", "betas, len(M) != len(Tdefvec)\") # Mtrans = M.T # MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] #", "The reduction list of obtained RMSD values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions", "# print \"modesToConsider, status: \", modesToConsider, status # return betas def getInitialGuessExpanding(self, listofPreviousBetas,", "Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions", "= M.T # the default maxiter is too low, increase the number maximalIter", "betasListWhole, anm_slc, preconceived=True) Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "the beta coefficents \"\"\" Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M):", "# RMSDReductions.append(RMSDReductions[-1]) # overlap.append(overlap[-1]) # print \"already reached RMSD = 1 at i:\",", "defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) except TimeoutError: print \"RMSD timeout at modes\",", "RMSD timeout at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess(previousBetas,", "at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\", \"a\") as myfile: myfile.write(referenceName+\" RMSD timeout", "betas Returns: The initial guess vector for the betas, padded with 0.0 to", "being deformed towards defvec: the deformation vector from proteinFrom to proteinTo referenceName: the", "guess for the fitter previousOverlap: The previous overlap previousRMSD: The previous reduced RMSD", "else it is the first RMSD reduction run, store betas and RMSD reduction", "deformed towards previousBetas: The previous betas, serves as part of the initial guess", "overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard = 0 else: print \"previous RMSD", "get the reduced RMSD ref_chain_copy = ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy,", "continue # calculate betas try: betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes)", "if i > self.utils.config.stopRMSDReductionAt: # temporary, to speedup other calculations continue if guard", "@timeout() def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas", "is given in : Moal, <NAME>., and <NAME>. \"SwarmDock and the Use of", "reach the correct length \"\"\" initialGuess = listofPreviousBetas initialGuess = np.append(initialGuess, [x*0.0 for", "- 6 initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes) if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM)", "the ANM matrix # modesToConsider: up to how many modes the betas should", "= [[0] * stepPointsReduction[0]] deformationSnapshots = OrderedDict() deformationSnapshots[\"proteinFrom\"] = mob_chain.copy() for i in", "dtype=np.float64) deformationSnapshots[\"proteinTo\"] = ref_chain.copy() return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots def getL_RMS(self, proteinFrom,", "the mode vectors are problematic, returning original RMSD\" RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo) #", "np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): print \"len(M): \", M.shape", "RMSD_after_Tapprox, currentOverlap, betas @timeout() def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False):", "previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list of RMSD reductions based increasing", "previousOverlap: currentOverlap = previousOverlap else: currentOverlap = 0 return RMSD_after_Tapprox, currentOverlap, betas def", "> (nonTrivialModes+self.utils.config.goOverdetermined): # if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: # print", "not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is the first RMSD", "at modes \" +str(i)+\" using previous betas\\n \") betas = self.getInitialGuess(betasListWhole, i) Tapprox", "= self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox = np.dot(betas, Marray.T) TapproxVector = Vector(Tapprox, \"Tapprox\") # apply", "of the eigenvectors towards the matched atoms ref_chain: The overall matched chain atoms", "at modes \" +str(Marray.shape[1])+\" using previous betas\\n \") betas = self.getInitialGuess(previousBetas, Marray.shape[1]) Tapprox", "or np.isinf(currentOverlap): print \"overlap has a numerical problem, maybe the two structures are", "M: \", M for i in range(1, len(excludeFirstK)): M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray())) #", "Calculate and return the dot product of all ANM modes transposed times all", "Vector(Tapprox, \"Tapprox\") # apply Tapprox to a copy of the unbound structure and", "overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) else: print \"previous RMSD lower at", "print \"modesToConsider, status: \", M.shape[1], status return betas # def obtainLstSqBetasByCollectivity(self, M, defvec,", "obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False): # \"\"\" Obtain betas by", "comparison if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox): print \"RMSD_after_Tapprox has a numerical problem, maybe the", "and return the dot product of all ANM modes transposed times all ANM", "match the range 0 to n-1 print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]]", "anm_slc[0].numModes()) guard = 0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i >", "\", M for i in range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M", "betas, padded with 0.0 to reach the correct length \"\"\" initialGuess = listofPreviousBetas[-1]", "L_RMSD reduction run, store L_RMS reduction results initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn) if", "M Tdefvec = defvec.getArray() #print \"shape(Tdefvec): \", np.shape(Tdefvec) #print \"shape(M): \", np.shape(M) if", "initialGuess # print \"modesToConsider, status: \", modesToConsider, status # return betas def getInitialGuessExpanding(self,", "being deformed towards previousBetas: The previous betas, serves as part of the initial", "# calculate betas try: betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True)", "= calcRMSD(ref_chain_copy, mob_chain) if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]): # store betas and RMSD reduction results", "Tdefvec), maxiter=maximalIter)[0:2] print \"modesToConsider, status: \", modesToConsider, status elif not preconceived: initialGuess =", "optimizer fitting, the formula is given in : # # Moal, <NAME>., and", "\"RMSD_after_Tapprox has a numerical problem, maybe the two structures are already too close", "numModes) except TimeoutError: print \"RMSD timeout at modes\", i,\"using previous betas\" with open(\"RMSDtimeoutgeneral\"+filePrefix+self.utils.config.whatAtomsToMatch+\".txt\",", "length \"\"\" initialGuess = listofPreviousBetas[-1] initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess),", "structures are already too close or the mode vectors are problematic, returning original", "Modes in Protein-Protein Docking.\" International Journal of # Molecular Sciences 11, no. 10", "lower at \", i # else the previous RMSD was actually lower, the", "\"overlap has a numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap)", "numerical problem\" if overlap: overlap.append(overlap[-1]) else: currentOverlap = 0 overlap.append(currentOverlap) guard = 0", "modes the betas should be calculated, starting from 0 to n-1 listofPreviousBetas: the", "self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc) except TimeoutError: print \"RMSD timeout at modes\",", "# else it is the first RMSD reduction run, no need to compare", "first RMSD reduction run, no need to compare against previous RMSD # store", "the range 0 to n-1 print stepPointsReduction betasListWhole = [[0] * stepPointsReduction[0]] deformationSnapshots", "cast objects overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) L_RMSReductions = np.array(L_RMSReductions,", "else the previous RMSD was actually lower, the beta calculation was not successful", "proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list of", "= self.getModeArrayKeepingFirstK(M, i) betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True) Tapprox", "M return M[0] def getModeArrayKeepingFirstK(self, arr, k): k += 1 k = range(0,", "values \"\"\" print \"anm_slc[0].getArray(): \", anm_slc[0][0:2].getArray().shape RMSDReductions = [] overlap = [] MTM", "np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) #", "structure # nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6 # initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider) #", "len(Tdefvec)\") Mtrans = M.T # the default maxiter is too low, increase the", "range(1, len(k)): M = np.dstack((M, np.dstack(arrCopy)[0][i])) #print \"M in keep first \"+str(i)+\": \",", "the bound structure and get the reduced RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() +", "a numerical problem\" currentOverlap = 0 overlap.append(currentOverlap) else: # else guard is >=", "beta calculation was not successful betasListWhole.append(betasListWhole[-1]) RMSDReductions.append(RMSDReductions[-1]) overlap.append(overlap[-1]) else: # else it is", "the ANM matrix modesToConsider: up to how many modes the betas should be", "\"\"\" M = anm Tdefvec = defvec.getArray() if len(M) != len(Tdefvec): print \"len(M):", "!= 0: # print \"modesToConsider, nonTrivialModes, status: \", modesToConsider, nonTrivialModes, \"status == \",", "# else it is the first L_RMSD reduction run, store L_RMS reduction results", "0 for i in stepPointsReduction: if self.utils.config.stopRMSDReductionAt: if i > self.utils.config.stopRMSDReductionAt: # temporary,", "indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap = [] Mtrans =", "import calcRMSD from scipy.sparse.linalg import cg from timeout import timeout from timeout import", "> (nonTrivialModes+self.utils.config.goOverdetermined): if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0: print \"modesToConsider, nonTrivialModes,", "if modesToConsider < 1: # print \"using one column\" # betas, status =", "proteinTo.select('segment \\\"L.\\\"') return calcRMSD(proteinFromL, proteinToL) else: # else it is an investigation on", "overlap = np.array(overlap, dtype=np.float64) RMSDReductions = np.array(RMSDReductions, dtype=np.float64) return RMSDReductions, overlap, stepPointsReduction def", "Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix): \"\"\" Calculate a list", "np.dstack(arrCopy)[0][0] #print \"first M in keep first k: \", M for i in", "ref_chain.copy() ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain) if RMSDReductions: if RMSD_after_Tapprox <", "purposes Returns: RMSDReductions: The reduction list of obtained RMSD values \"\"\" RMSDReductions =", "RMSD mob_chain_copy = mob_chain.copy() mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3()) RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain) if RMSDReductions:", "every value by one to have the index match the range 0 to", "M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest) defvec = calcDeformVector(ref_chain, mob_chain) RMSDReductions = [] overlap", "chains of the protein which is being deformed towards defvec: the deformation vector", "all ANM modes transposed times all ANM modes.\"\"\" M = anm.getArray() Mtrans =", "else: RMSDReductions.append(initial_RMSD) print \"first mode did not lower RMSD\" betasListWhole.append(betas) # calc overlap", "= 0 else: previousOverlap = previousOverlap[-1] if len(previousRMSD) == 0: previousRMSD = calcRMSD(proteinFrom,", "#print \"shape(M): \", np.shape(M) if len(M) != len(Tdefvec): print \"len(M): \", M.shape print", "status: \", modesToConsider, status return betas @timeout() def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider,", "defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False): \"\"\" Obtain betas by a scipy optimizer", "= cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2] # elif not preconceived: # initialGuess = self.getInitialGuess(listofPreviousBetas," ]
[ "= '002fd410a290' down_revision = '<KEY>' branch_labels = None depends_on = None def upgrade():", "op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade():", "existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE", "= '<KEY>' branch_labels = None depends_on = None def upgrade(): op.execute('ALTER TABLE \"owner\"", "as sa # revision identifiers, used by Alembic. revision = '002fd410a290' down_revision =", "CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(),", "'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.create_primary_key('owner_pkey', 'owner',", "import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision", "\"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\",", "TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title',", "Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import op import sqlalchemy as sa", "down_revision = '<KEY>' branch_labels = None depends_on = None def upgrade(): op.execute('ALTER TABLE", "type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\"", "def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(),", "\"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String())", "type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE')", "revision identifiers, used by Alembic. revision = '002fd410a290' down_revision = '<KEY>' branch_labels =", "= None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity',", "None depends_on = None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE')", "alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic.", "import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '002fd410a290'", "2019-02-05 13:40:59.112652 \"\"\" from alembic import op import sqlalchemy as sa # revision", "sa # revision identifiers, used by Alembic. revision = '002fd410a290' down_revision = '<KEY>'", "# revision identifiers, used by Alembic. revision = '002fd410a290' down_revision = '<KEY>' branch_labels", "DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(),", "'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT", "13:40:59.112652 \"\"\" from alembic import op import sqlalchemy as sa # revision identifiers,", "upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title',", "downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\",", "'<KEY>' branch_labels = None depends_on = None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP", "op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.create_primary_key('owner_pkey',", "from alembic import op import sqlalchemy as sa # revision identifiers, used by", "= None depends_on = None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\"", "existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.create_primary_key('owner_pkey', 'owner', ['identity'])", "op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity',", "sqlalchemy as sa # revision identifiers, used by Alembic. revision = '002fd410a290' down_revision", "used by Alembic. revision = '002fd410a290' down_revision = '<KEY>' branch_labels = None depends_on", "ID: 002fd410a290 Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import op", "def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String())", "revision = '002fd410a290' down_revision = '<KEY>' branch_labels = None depends_on = None def", "identifiers, used by Alembic. revision = '002fd410a290' down_revision = '<KEY>' branch_labels = None", "Revision ID: 002fd410a290 Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import", "\"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity',", "Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import op import sqlalchemy", "depends_on = None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner',", "\"\"\" from alembic import op import sqlalchemy as sa # revision identifiers, used", "op import sqlalchemy as sa # revision identifiers, used by Alembic. revision =", "<reponame>LandRegistry/digital-street-title-api<filename>migrations/versions/002fd410a290_002_owner_id_type.py<gh_stars>0 \"\"\"empty message Revision ID: 002fd410a290 Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\"", "\"\"\"empty message Revision ID: 002fd410a290 Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from", "Alembic. revision = '002fd410a290' down_revision = '<KEY>' branch_labels = None depends_on = None", "CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner',", "op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP", "'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity',", "op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner',", "None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(),", "\"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity'])", "['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(),", "message Revision ID: 002fd410a290 Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic", "DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey',", "existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\"", "Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import op import sqlalchemy as sa #", "CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def", "'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String()) op.create_primary_key('owner_pkey', 'owner', ['identity']) def downgrade(): op.execute('ALTER", "by Alembic. revision = '002fd410a290' down_revision = '<KEY>' branch_labels = None depends_on =", "002fd410a290 Revises: <KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import op import", "TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String()) op.alter_column('title', 'owner_identity', existing_type=sa.Integer(),", "<KEY> Create Date: 2019-02-05 13:40:59.112652 \"\"\" from alembic import op import sqlalchemy as", "op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT \"owner_pkey\" CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True)", "CASCADE') op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True) op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using=\"identity::integer\", autoincrement=True)", "'002fd410a290' down_revision = '<KEY>' branch_labels = None depends_on = None def upgrade(): op.execute('ALTER", "branch_labels = None depends_on = None def upgrade(): op.execute('ALTER TABLE \"owner\" DROP CONSTRAINT" ]
[ "= license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(),", "in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns:", "from .base import BaseMaker #: Supported licenses, corresponding template file names, and descriptions", "GNU Lesser General Public License v2.1 * LGPL3: GNU Lesser General Public License", "if successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile)", "for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU", "domain * GPL2: GNU General Public License v2.0 * GPL3: GNU General Public", "user did not specify a license in setup.cfg-- it creates the default license,", "to create. Attributes: default_license (str): default license(class variable) \"\"\" default_license = 'MIT' def", "SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary License Args: license (str): license name", "directory to create force (bool): option for overwriting if the file exists. license", "given is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported", "licenses are:: * APACHE: Apace License * CC0: Creative Commons License for public", "* SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary License Args: license (str): license", "\"GNU Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License", "overwriting if the file exists. license (str): license to create. Attributes: default_license (str):", "for overwriting if the file exists. license (str): license to create. Attributes: default_license", "\"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"],", "file exists. license (str): license to create. Attributes: default_license (str): default license(class variable)", "ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"* You can change the", "not specify a license in setup.cfg-- it creates the default license, which is", "v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General", "\"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file", "def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), }", "in setup.cfg file. But if it can not retrieve a license from the", "\"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License", "License v3.0 * MIT: MIT License, **Default** * MOZILLA: Mozilla Public License v2.0", "Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"],", "template file names, and descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\":", "\"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public domain\"], \"GPL2\":", "file--for example, when the user did not specify a license in setup.cfg-- it", "projectDir self.force = force self.license = license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\"", "the license given is supported by *skelpy* or not license name is case-insensitive.", "\"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file in the", "case-insensitive. .. Note:: Currently supported licenses are:: * APACHE: Apace License * CC0:", "False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not", "Note:: Currently supported licenses are:: * APACHE: Apace License * CC0: Creative Commons", "\"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses Returns: None \"\"\" print('Supported", "setup.cfg-- it creates the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args:", "\"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function import os import", "APACHE: Apace License * CC0: Creative Commons License for public domain * GPL2:", "k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method of", "License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT", "which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of project", "project directory ``LicenseMaker`` basically choose the license specified in setup.cfg file. But if", "General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public", "or not license name is case-insensitive. .. Note:: Currently supported licenses are:: *", "``LICENSE`` file in the project directory ``LicenseMaker`` basically choose the license specified in", "see if the license given is supported by *skelpy* or not license name", "License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file in the project", "True if successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0],", "__init__(self, projectDir, force, license, **kwargs): self.projectDir = projectDir self.force = force self.license =", "(bool): option for overwriting if the file exists. license (str): license to create.", "* LGPL: GNU Lesser General Public License v2.1 * LGPL3: GNU Lesser General", "datetime from . import settings from .base import BaseMaker #: Supported licenses, corresponding", "project directory to create force (bool): option for overwriting if the file exists.", "bool: True if the license given is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper()))", "\" \" * 4 for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1]))", "\"\"\"print supported licenses Returns: None \"\"\" print('Supported licenses are as follows:') indent =", "[\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\",", "absolute_import, print_function import os import datetime from . import settings from .base import", "of :class:`LicenseMaker` Returns: bool: True if successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir,", "LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file in the project directory ``LicenseMaker`` basically", "'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"* You can change", "General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"], \"LGPL3\":", "Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary", "License for public domain * GPL2: GNU General Public License v2.0 * GPL3:", "_LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for", "example, when the user did not specify a license in setup.cfg-- it creates", "can change the license with 'license' sub-command.\\n\" \"For help, see 'skelpy license -h", "names, and descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative", "it creates the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir", "Software Distribution) License * SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary License Args:", "Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create", "with 'license' sub-command.\\n\" \"For help, see 'skelpy license -h or --help'.\") return bool(ret)", "if the license given is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def", "supported licenses Returns: None \"\"\" print('Supported licenses are as follows:') indent = \"", "not ret: self.logger.info( \"* You can change the license with 'license' sub-command.\\n\" \"For", "Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License", "file. But if it can not retrieve a license from the file--for example,", "False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses Returns: None", "k, v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True if successful,", ".base import BaseMaker #: Supported licenses, corresponding template file names, and descriptions _LICENSES", "bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses Returns: None \"\"\" print('Supported licenses are", "given is supported by *skelpy* or not license name is case-insensitive. .. Note::", "`MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of project directory to create", "True if the license given is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod", "for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method", "BaseMaker #: Supported licenses, corresponding template file names, and descriptions _LICENSES = {", "'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see if", "to create force (bool): option for overwriting if the file exists. license (str):", "[\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\":", "\" * 4 for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def", "License v3.0 * LGPL: GNU Lesser General Public License v2.1 * LGPL3: GNU", "General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\",", "the license given is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses():", "file names, and descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\",", "<https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of project directory to create force (bool):", "follows:') indent = \" \" * 4 for k, v in _LICENSES.items(): print('{0}{1}:", ":class:`LicenseMaker` Returns: bool: True if successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE')", "Proprietary License Args: license (str): license name Returns: bool: True if the license", "v3.0 * LGPL: GNU Lesser General Public License v2.1 * LGPL3: GNU Lesser", "licFile) if not ret: self.logger.info( \"* You can change the license with 'license'", "\"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU", "\"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker*", "license given is supported by *skelpy* or not license name is case-insensitive. ..", "[\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"],", "descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License", "License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General", "setup.cfg file. But if it can not retrieve a license from the file--for", "Returns: bool: True if successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret", "create force (bool): option for overwriting if the file exists. license (str): license", "GNU General Public License v3.0 * LGPL: GNU Lesser General Public License v2.1", "corresponding template file names, and descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"],", "BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class", "Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"],", "v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified BSD License", "license given is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print", "coding: utf-8 -*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function", "Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to", "(str): default license(class variable) \"\"\" default_license = 'MIT' def __init__(self, projectDir, force, license,", "= \" \" * 4 for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k,", "= 'MIT' def __init__(self, projectDir, force, license, **kwargs): self.projectDir = projectDir self.force =", "print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True", "force self.license = license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = {", "defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function import os import datetime from", "print('Supported licenses are as follows:') indent = \" \" * 4 for k,", "name is case-insensitive. .. Note:: Currently supported licenses are:: * APACHE: Apace License", "it can not retrieve a license from the file--for example, when the user", "not license name is case-insensitive. .. Note:: Currently supported licenses are:: * APACHE:", "os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"* You can", "**Default** * MOZILLA: Mozilla Public License v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution)", "#: Supported licenses, corresponding template file names, and descriptions _LICENSES = { \"APACHE\":", "v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\":", "Lesser General Public License v2.1 * LGPL3: GNU Lesser General Public License v3.0", "license, which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of", "License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU", ".. Note:: Currently supported licenses are:: * APACHE: Apace License * CC0: Creative", "licenses, corresponding template file names, and descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache", "the file exists. license (str): license to create. Attributes: default_license (str): default license(class", "* PROPRIETARY: Proprietary License Args: license (str): license name Returns: bool: True if", "a license in setup.cfg-- it creates the default license, which is the `MIT", "licenses Returns: None \"\"\" print('Supported licenses are as follows:') indent = \" \"", "v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software", "License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\",", "creates the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str):", "} class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file in the project directory", "General Public License v2.1 * LGPL3: GNU Lesser General Public License v3.0 *", "settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see if the license given is supported", "Returns: bool: True if the license given is supported, False otherwise \"\"\" return", "self.logger.info( \"* You can change the license with 'license' sub-command.\\n\" \"For help, see", "licenses are as follows:') indent = \" \" * 4 for k, v", "* 4 for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self):", "basically choose the license specified in setup.cfg file. But if it can not", "* MOZILLA: Mozilla Public License v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution) License", "otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret:", "Currently supported licenses are:: * APACHE: Apace License * CC0: Creative Commons License", "license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of project directory to create force", "is_supported_license(license): \"\"\"check to see if the license given is supported by *skelpy* or", "\"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"],", ":attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def", "\"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\":", "for public domain * GPL2: GNU General Public License v2.0 * GPL3: GNU", "Mozilla Public License v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution) License * SIMPLE-BSD:", "Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla", "file in the project directory ``LicenseMaker`` basically choose the license specified in setup.cfg", "the license specified in setup.cfg file. But if it can not retrieve a", "NEW-BSD: New BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified BSD License * PROPRIETARY:", "licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"*", "generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True if successful, False otherwise \"\"\"", "import BaseMaker #: Supported licenses, corresponding template file names, and descriptions _LICENSES =", "import os import datetime from . import settings from .base import BaseMaker #:", "exists. license (str): license to create. Attributes: default_license (str): default license(class variable) \"\"\"", ". import settings from .base import BaseMaker #: Supported licenses, corresponding template file", "self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year),", "(str): absolute path of project directory to create force (bool): option for overwriting", "License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley", "License * CC0: Creative Commons License for public domain * GPL2: GNU General", "Args: license (str): license name Returns: bool: True if the license given is", "projectDir (str): absolute path of project directory to create force (bool): option for", "BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary License", "License v2.0 * GPL3: GNU General Public License v3.0 * LGPL: GNU Lesser", "license(class variable) \"\"\" default_license = 'MIT' def __init__(self, projectDir, force, license, **kwargs): self.projectDir", "in setup.cfg-- it creates the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_.", "License Args: license (str): license name Returns: bool: True if the license given", "Supported licenses, corresponding template file names, and descriptions _LICENSES = { \"APACHE\": [\"license_apache\",", "to create ``LICENSE`` file in the project directory ``LicenseMaker`` basically choose the license", "from the file--for example, when the user did not specify a license in", "LGPL: GNU Lesser General Public License v2.1 * LGPL3: GNU Lesser General Public", "[\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New", "Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\",", "[\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker):", "\"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\":", "is supported by *skelpy* or not license name is case-insensitive. .. Note:: Currently", "\"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General", "Commons License for public domain * GPL2: GNU General Public License v2.0 *", "by *skelpy* or not license name is case-insensitive. .. Note:: Currently supported licenses", "General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"], \"MIT\":", "the project directory ``LicenseMaker`` basically choose the license specified in setup.cfg file. But", "os import datetime from . import settings from .base import BaseMaker #: Supported", "module defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function import os import datetime", "GNU General Public License v2.0 * GPL3: GNU General Public License v3.0 *", "the license with 'license' sub-command.\\n\" \"For help, see 'skelpy license -h or --help'.\")", "License * SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary License Args: license (str):", "Simplified BSD License * PROPRIETARY: Proprietary License Args: license (str): license name Returns:", "Attributes: default_license (str): default license(class variable) \"\"\" default_license = 'MIT' def __init__(self, projectDir,", "# -*- coding: utf-8 -*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__ import", "to see if the license given is supported by *skelpy* or not license", "when the user did not specify a license in setup.cfg-- it creates the", "license to create. Attributes: default_license (str): default license(class variable) \"\"\" default_license = 'MIT'", "successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if", "import settings from .base import BaseMaker #: Supported licenses, corresponding template file names,", "v3.0 * MIT: MIT License, **Default** * MOZILLA: Mozilla Public License v2.0 *", "Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\":", "v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True if successful, False", "License v2.1 * LGPL3: GNU Lesser General Public License v3.0 * MIT: MIT", "BSD License * PROPRIETARY: Proprietary License Args: license (str): license name Returns: bool:", "method of :class:`LicenseMaker` Returns: bool: True if successful, False otherwise \"\"\" licFile =", "is case-insensitive. .. Note:: Currently supported licenses are:: * APACHE: Apace License *", "license (str): license to create. Attributes: default_license (str): default license(class variable) \"\"\" default_license", "Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified", "specified in setup.cfg file. But if it can not retrieve a license from", "\"Creative Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"],", "retrieve a license from the file--for example, when the user did not specify", "self.projectDir = projectDir self.force = force self.license = license self._update_settings() def _update_settings(self): \"\"\"update", "class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file in the project directory ``LicenseMaker``", "\"GNU Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\",", "public domain * GPL2: GNU General Public License v2.0 * GPL3: GNU General", "You can change the license with 'license' sub-command.\\n\" \"For help, see 'skelpy license", "the file--for example, when the user did not specify a license in setup.cfg--", "New BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary", "* GPL3: GNU General Public License v3.0 * LGPL: GNU Lesser General Public", "@staticmethod def is_supported_license(license): \"\"\"check to see if the license given is supported by", "[\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\":", "* LGPL3: GNU Lesser General Public License v3.0 * MIT: MIT License, **Default**", "= self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"* You can change the license", "\"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public", "the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute", "in the project directory ``LicenseMaker`` basically choose the license specified in setup.cfg file.", "class to create ``LICENSE`` file in the project directory ``LicenseMaker`` basically choose the", "are as follows:') indent = \" \" * 4 for k, v in", "change the license with 'license' sub-command.\\n\" \"For help, see 'skelpy license -h or", "from . import settings from .base import BaseMaker #: Supported licenses, corresponding template", "default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path", "settings from .base import BaseMaker #: Supported licenses, corresponding template file names, and", "return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses Returns: None \"\"\" print('Supported licenses", "But if it can not retrieve a license from the file--for example, when", "and descriptions _LICENSES = { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons", "did not specify a license in setup.cfg-- it creates the default license, which", "None \"\"\" print('Supported licenses are as follows:') indent = \" \" * 4", "\"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info(", "\"\"\"*Maker* class to create ``LICENSE`` file in the project directory ``LicenseMaker`` basically choose", "create. Attributes: default_license (str): default license(class variable) \"\"\" default_license = 'MIT' def __init__(self,", "\"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\",", "if it can not retrieve a license from the file--for example, when the", "MIT: MIT License, **Default** * MOZILLA: Mozilla Public License v2.0 * NEW-BSD: New", "BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\",", "General Public License v2.0 * GPL3: GNU General Public License v3.0 * LGPL:", "[\"license_cc0_1.0\", \"Creative Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License", "force, license, **kwargs): self.projectDir = projectDir self.force = force self.license = license self._update_settings()", "License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from", "def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True if successful, False otherwise", "bool: True if successful, False otherwise \"\"\" licFile = os.path.join(self.projectDir, 'LICENSE') ret =", "Public License v3.0 * MIT: MIT License, **Default** * MOZILLA: Mozilla Public License", "**kwargs): self.projectDir = projectDir self.force = force self.license = license self._update_settings() def _update_settings(self):", "* GPL2: GNU General Public License v2.0 * GPL3: GNU General Public License", "(str): license name Returns: bool: True if the license given is supported, False", "utf-8 -*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function import", "projectDir, force, license, **kwargs): self.projectDir = projectDir self.force = force self.license = license", "create ``LICENSE`` file in the project directory ``LicenseMaker`` basically choose the license specified", "Public License v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified", "str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see if the license given", "= force self.license = license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info =", "if not ret: self.logger.info( \"* You can change the license with 'license' sub-command.\\n\"", "*skelpy* or not license name is case-insensitive. .. Note:: Currently supported licenses are::", "choose the license specified in setup.cfg file. But if it can not retrieve", "\"* You can change the license with 'license' sub-command.\\n\" \"For help, see 'skelpy", "if the license given is supported by *skelpy* or not license name is", "otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses Returns: None \"\"\"", "\"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public", "the user did not specify a license in setup.cfg-- it creates the default", "supported licenses are:: * APACHE: Apace License * CC0: Creative Commons License for", "the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of project directory to", "\"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"],", "Public License v2.0 * GPL3: GNU General Public License v3.0 * LGPL: GNU", "v2.1 * LGPL3: GNU Lesser General Public License v3.0 * MIT: MIT License,", "{2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True if", "as follows:') indent = \" \" * 4 for k, v in _LICENSES.items():", "a license from the file--for example, when the user did not specify a", ":class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function import os import datetime from .", "'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see if the license", "{ \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public domain\"],", "v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser", "= { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to", "def print_licenses(): \"\"\"print supported licenses Returns: None \"\"\" print('Supported licenses are as follows:')", "print_licenses(): \"\"\"print supported licenses Returns: None \"\"\" print('Supported licenses are as follows:') indent", "* NEW-BSD: New BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified BSD License *", "License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser", "option for overwriting if the file exists. license (str): license to create. Attributes:", "= projectDir self.force = force self.license = license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings`", "'MIT' def __init__(self, projectDir, force, license, **kwargs): self.projectDir = projectDir self.force = force", "dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license):", "license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year':", "} settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see if the license given is", "ret: self.logger.info( \"* You can change the license with 'license' sub-command.\\n\" \"For help,", "v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker`", "\"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley", "default license(class variable) \"\"\" default_license = 'MIT' def __init__(self, projectDir, force, license, **kwargs):", "if the file exists. license (str): license to create. Attributes: default_license (str): default", "GPL3: GNU General Public License v3.0 * LGPL: GNU Lesser General Public License", "\"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\",", "indent = \" \" * 4 for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent,", "def is_supported_license(license): \"\"\"check to see if the license given is supported by *skelpy*", "class.\"\"\" from __future__ import absolute_import, print_function import os import datetime from . import", "Public License v2.1 * LGPL3: GNU Lesser General Public License v3.0 * MIT:", "license with 'license' sub-command.\\n\" \"For help, see 'skelpy license -h or --help'.\") return", "license specified in setup.cfg file. But if it can not retrieve a license", "\"\"\" default_license = 'MIT' def __init__(self, projectDir, force, license, **kwargs): self.projectDir = projectDir", "v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License,", "Returns: None \"\"\" print('Supported licenses are as follows:') indent = \" \" *", "Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\",", "\"\"\"Worker method of :class:`LicenseMaker` Returns: bool: True if successful, False otherwise \"\"\" licFile", "MIT License, **Default** * MOZILLA: Mozilla Public License v2.0 * NEW-BSD: New BSD(Berkeley", "public domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General", "v2.0 * GPL3: GNU General Public License v3.0 * LGPL: GNU Lesser General", "is the `MIT license <https://opensource.org/licenses/MIT>`_. Args: projectDir (str): absolute path of project directory", "{ 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see", "license name Returns: bool: True if the license given is supported, False otherwise", "_LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker method of :class:`LicenseMaker` Returns: bool:", "can not retrieve a license from the file--for example, when the user did", "directory ``LicenseMaker`` basically choose the license specified in setup.cfg file. But if it", "supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses Returns:", "name Returns: bool: True if the license given is supported, False otherwise \"\"\"", "is supported, False otherwise \"\"\" return bool(_LICENSES.get(license.upper())) @staticmethod def print_licenses(): \"\"\"print supported licenses", "Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU", "License v3.0\"], \"MIT\": [\"license_mit\", \"MIT License, Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"],", "python # -*- coding: utf-8 -*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__", "``LicenseMaker`` basically choose the license specified in setup.cfg file. But if it can", "[\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE`` file in", "default_license = 'MIT' def __init__(self, projectDir, force, license, **kwargs): self.projectDir = projectDir self.force", "license from the file--for example, when the user did not specify a license", "_update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info)", "of project directory to create force (bool): option for overwriting if the file", "\"\"\" print('Supported licenses are as follows:') indent = \" \" * 4 for", "Distribution) License * SIMPLE-BSD: Simplified BSD License * PROPRIETARY: Proprietary License Args: license", "PROPRIETARY: Proprietary License Args: license (str): license name Returns: bool: True if the", "[\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"],", "specify a license in setup.cfg-- it creates the default license, which is the", "from __future__ import absolute_import, print_function import os import datetime from . import settings", "-*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import, print_function import os", "\"\"\"check to see if the license given is supported by *skelpy* or not", "license (str): license name Returns: bool: True if the license given is supported,", "Apace License * CC0: Creative Commons License for public domain * GPL2: GNU", "are:: * APACHE: Apace License * CC0: Creative Commons License for public domain", "Args: projectDir (str): absolute path of project directory to create force (bool): option", "supported by *skelpy* or not license name is case-insensitive. .. Note:: Currently supported", "self.force = force self.license = license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info", "force (bool): option for overwriting if the file exists. license (str): license to", "datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check to see if the", "\"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod", "\"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution) License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution)", "self.license = license self._update_settings() def _update_settings(self): \"\"\"update :attr:`maker.settings` dictionary\"\"\" info = { 'today':", "* MIT: MIT License, **Default** * MOZILLA: Mozilla Public License v2.0 * NEW-BSD:", "Creative Commons License for public domain * GPL2: GNU General Public License v2.0", "license, **kwargs): self.projectDir = projectDir self.force = force self.license = license self._update_settings() def", "(str): license to create. Attributes: default_license (str): default license(class variable) \"\"\" default_license =", "LGPL3: GNU Lesser General Public License v3.0 * MIT: MIT License, **Default** *", "info = { 'today': datetime.date.today().isoformat(), 'year': str(datetime.date.today().year), } settings.update(info) @staticmethod def is_supported_license(license): \"\"\"check", "MOZILLA: Mozilla Public License v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution) License *", "__future__ import absolute_import, print_function import os import datetime from . import settings from", "= { \"APACHE\": [\"license_apache\", \"Apache License\"], \"CC0\": [\"license_cc0_1.0\", \"Creative Commons License for public", "Default\"], \"MOZILLA\": [\"license_mozilla\", \"Mozilla Public License v2.0\"], \"NEW-BSD\": [\"license_new_bsd\", \"New BSD(Berkeley Software Distribution)", "License v2.0 * NEW-BSD: New BSD(Berkeley Software Distribution) License * SIMPLE-BSD: Simplified BSD", "license in setup.cfg-- it creates the default license, which is the `MIT license", "[\"license_gpl_3.0\", \"GNU General Public License v3.0\"], \"LGPL2\": [\"license_lgpl_2.1\", \"GNU Lesser General Public License", "* CC0: Creative Commons License for public domain * GPL2: GNU General Public", "-*- coding: utf-8 -*- \"\"\"This module defines :class:`LicenseMaker` class.\"\"\" from __future__ import absolute_import,", "import absolute_import, print_function import os import datetime from . import settings from .base", "4 for k, v in _LICENSES.items(): print('{0}{1}: {2}'.format(indent, k, v[1])) def generate(self): \"\"\"Worker", "not retrieve a license from the file--for example, when the user did not", "License * PROPRIETARY: Proprietary License Args: license (str): license name Returns: bool: True", "\"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class", "CC0: Creative Commons License for public domain * GPL2: GNU General Public License", "License, **Default** * MOZILLA: Mozilla Public License v2.0 * NEW-BSD: New BSD(Berkeley Software", "import datetime from . import settings from .base import BaseMaker #: Supported licenses,", "path of project directory to create force (bool): option for overwriting if the", "license name is case-insensitive. .. Note:: Currently supported licenses are:: * APACHE: Apace", "[\"license_lgpl_2.1\", \"GNU Lesser General Public License v2.1\"], \"LGPL3\": [\"license_lgpl_3.0\", \"GNU Lesser General Public", "print_function import os import datetime from . import settings from .base import BaseMaker", "self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"* You can change the license with", "default_license (str): default license(class variable) \"\"\" default_license = 'MIT' def __init__(self, projectDir, force,", "License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], } class LicenseMaker(BaseMaker): \"\"\"*Maker* class to create ``LICENSE``", "License\"], \"SIMPLE-BSD\": [\"license_simplified_bsd\", \"Simplified BSD(Berkeley Software Distribution) License\"], \"PROPRIETARY\": [\"license_proprietary\", \"Proprietary License\"], }", "* APACHE: Apace License * CC0: Creative Commons License for public domain *", "def __init__(self, projectDir, force, license, **kwargs): self.projectDir = projectDir self.force = force self.license", "domain\"], \"GPL2\": [\"license_gpl_2.0\", \"GNU General Public License v2.0\"], \"GPL3\": [\"license_gpl_3.0\", \"GNU General Public", "@staticmethod def print_licenses(): \"\"\"print supported licenses Returns: None \"\"\" print('Supported licenses are as", "GNU Lesser General Public License v3.0 * MIT: MIT License, **Default** * MOZILLA:", "Lesser General Public License v3.0 * MIT: MIT License, **Default** * MOZILLA: Mozilla", "General Public License v3.0 * MIT: MIT License, **Default** * MOZILLA: Mozilla Public", "variable) \"\"\" default_license = 'MIT' def __init__(self, projectDir, force, license, **kwargs): self.projectDir =", "General Public License v3.0 * LGPL: GNU Lesser General Public License v2.1 *", "= os.path.join(self.projectDir, 'LICENSE') ret = self.write_file(_LICENSES[self.license][0], licFile) if not ret: self.logger.info( \"* You", "GPL2: GNU General Public License v2.0 * GPL3: GNU General Public License v3.0", "Public License v3.0 * LGPL: GNU Lesser General Public License v2.1 * LGPL3:", "absolute path of project directory to create force (bool): option for overwriting if" ]
[ "self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01)", "unittest from nes.processors.cpu import Cpu from nes.bus import Bus from nes.bus.devices.memory import Ram", "self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction =", "0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) if __name__ == '__main__':", "class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu =", "from nes.bus import Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus", "Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM',", "= Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00)", "Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n)", "= self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction", "instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) if __name__ == '__main__': unittest.main()", "self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value", "self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01)", "import Cpu from nes.bus import Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def", "import Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus()", "bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000,", "setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus) def test_inc(self):", "def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def", "test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def", "import unittest from nes.processors.cpu import Cpu from nes.bus import Bus from nes.bus.devices.memory import", "256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000),", "from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256),", "bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction =", "test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) if", "instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00", "Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction", "0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value =", "= 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value", "= self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction", "CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus)", "0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value,", "Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu", "= Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z)", "self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) if __name__", "from nes.processors.cpu import Cpu from nes.bus import Bus from nes.bus.devices.memory import Ram class", "self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction)", "import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256)", "self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z)", "0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value =", "self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z)", "nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0,", "self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01)", "= 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) if __name__ ==", "self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8)", "def setUp(self): bus = Bus() bus.attach_device('RAM', Ram(256), 0, 256) self.cpu = Cpu(bus) def", "0, 256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction)", "def test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n)", "Ram(256), 0, 256) self.cpu = Cpu(bus) def test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE)", "self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8)", "Cpu from nes.bus import Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self):", "instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00", "self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self):", "self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction =", "def test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction) self.assertEqual(self.cpu.y.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n)", "nes.processors.cpu import Cpu from nes.bus import Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase):", "test_inc(self): self.cpu.write(0x0000, 0x00) instruction = self.cpu.decode(0xEE) self.cpu.execute(instruction) self.assertEqual(self.cpu.read(0x0000), 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self):", "nes.bus import Bus from nes.bus.devices.memory import Ram class CpuIncrementInstructionsTestCase(unittest.TestCase): def setUp(self): bus =", "self.assertEqual(self.cpu.x.value, 0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_iny(self): self.cpu.y.value = 0x00 instruction = self.cpu.decode(0xC8) self.cpu.execute(instruction)", "0x01) self.assertFalse(self.cpu.p.z) self.assertFalse(self.cpu.p.n) def test_inx(self): self.cpu.x.value = 0x00 instruction = self.cpu.decode(0xE8) self.cpu.execute(instruction) self.assertEqual(self.cpu.x.value," ]
[ "self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True,", "{ \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste", "setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\":", "apps.account.models import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\":", "\"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de Notes\", } def test_insert_custom_user(self): user =", "\"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de Notes\", } def test_insert_custom_user(self): user", "\"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de", "\"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de Notes\", } def", "\"is_active\": True, \"notes\": \"Teste de Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"),", "def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"), last_name=self.data_insert.get(\"last_name\"), is_staff=self.data_insert.get(\"is_staff\"), is_active=self.data_insert.get(\"is_active\"), notes=self.data_insert.get(\"notes\"), ) self.assertIsNotNone(user.pk)", "TestCase from apps.account.models import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\":", "= { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\":", "True, \"notes\": \"Teste de Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"),", "\"Teste de Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"), last_name=self.data_insert.get(\"last_name\"), is_staff=self.data_insert.get(\"is_staff\"),", "from django.test import TestCase from apps.account.models import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert", "TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\":", "} def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"), last_name=self.data_insert.get(\"last_name\"), is_staff=self.data_insert.get(\"is_staff\"), is_active=self.data_insert.get(\"is_active\"), notes=self.data_insert.get(\"notes\"), )", "\"notes\": \"Teste de Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"), last_name=self.data_insert.get(\"last_name\"),", "django.test import TestCase from apps.account.models import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert =", "def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False,", "import TestCase from apps.account.models import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = {", "de Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"), last_name=self.data_insert.get(\"last_name\"), is_staff=self.data_insert.get(\"is_staff\"), is_active=self.data_insert.get(\"is_active\"),", "\"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de Notes\",", "from apps.account.models import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\",", "\"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de Notes\", } def test_insert_custom_user(self):", "import CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\",", "Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create( email=self.data_insert.get(\"email\"), first_name=self.data_insert.get(\"first_name\"), last_name=self.data_insert.get(\"last_name\"), is_staff=self.data_insert.get(\"is_staff\"), is_active=self.data_insert.get(\"is_active\"), notes=self.data_insert.get(\"notes\"),", "CustomUser class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\":", "False, \"is_active\": True, \"notes\": \"Teste de Notes\", } def test_insert_custom_user(self): user = CustomUser.objects.create(", "\"first_name\": \"Teste01\", \"last_name\": \"Last\", \"is_staff\": False, \"is_active\": True, \"notes\": \"Teste de Notes\", }", "class TestsCustomUserModel(TestCase): def setUp(self): self.data_insert = { \"email\": \"<EMAIL>\", \"first_name\": \"Teste01\", \"last_name\": \"Last\"," ]
[ "segmentation task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer`", "from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing", "RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing of regression task with safeguard against", ".. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester):", "class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing of image classification task with", "@thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing of image segmentation task", "existing Trainers.\"\"\" import thelper.concepts from thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer from", "ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing of image classification task with safeguard", "class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing of image segmentation task with", "| :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing of", ":class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing", "safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.segm.ImageSegmTrainer` \"\"\"", "thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class", "| :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for", "object detection task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` |", "class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing of regression task with safeguard", "class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing of object detection task with", "of image classification task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester`", "ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing of image segmentation task with safeguard", "from thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from", "for testing of image classification task with safeguard against model training. .. seealso::", "of regression task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` |", "Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer", "model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class", "of image segmentation task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester`", ":class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized", "with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.segm.ImageSegmTrainer`", "training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer,", "against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation", "\"\"\"Session runner specialized for testing of image classification task with safeguard against model", "training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer,", "Tester definitions from existing Trainers.\"\"\" import thelper.concepts from thelper.infer.base import Tester from thelper.train.classif", "thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr", "runner specialized for testing of object detection task with safeguard against model training.", ":class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing of regression", "@thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing of regression task with", "Tester): \"\"\"Session runner specialized for testing of regression task with safeguard against model", "thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing of", "Tester): \"\"\"Session runner specialized for testing of image segmentation task with safeguard against", "against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression", "\"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing of image segmentation", "model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class", "seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session", "thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner", "@thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing of object detection task", ".. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester):", "training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer,", "\"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing of regression task", "regression task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer`", "safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\"", ":class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing of object", ":class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized", "\"\"\"Session runner specialized for testing of regression task with safeguard against model training.", "against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection", "with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer`", "| :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner", "import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing of image", "Trainers.\"\"\" import thelper.concepts from thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect", "thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm", "runner specialized for testing of regression task with safeguard against model training. ..", "import Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import", "from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from", "from existing Trainers.\"\"\" import thelper.concepts from thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer", ":class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized", "| :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for", "import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer,", "specialized for testing of regression task with safeguard against model training. .. seealso::", "safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\"", "seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session", "import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm import", "specialized for testing of image classification task with safeguard against model training. ..", ":class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester): \"\"\"Session runner specialized for testing", "@thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing of image classification task", ":class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing of image", "for testing of object detection task with safeguard against model training. .. seealso::", "of object detection task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester`", "definitions from existing Trainers.\"\"\" import thelper.concepts from thelper.infer.base import Tester from thelper.train.classif import", "model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class", "| :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner", "specialized for testing of image segmentation task with safeguard against model training. ..", "RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for", ":class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing", "image segmentation task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` |", "| :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner specialized for testing of", "classification task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer`", "| :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for", "ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing of object detection task with safeguard", "Tester): \"\"\"Session runner specialized for testing of image classification task with safeguard against", "Tester): \"\"\"Session runner specialized for testing of object detection task with safeguard against", "import thelper.concepts from thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import", "with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.classif.ImageClassifTrainer`", "seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session", "specialized for testing of object detection task with safeguard against model training. ..", "runner specialized for testing of image classification task with safeguard against model training.", "| :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\" @thelper.concepts.segmentation class ImageSegmTester(ImageSegmTrainer, Tester): \"\"\"Session runner", "import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized", "\"\"\"Explicit Tester definitions from existing Trainers.\"\"\" import thelper.concepts from thelper.infer.base import Tester from", "\"\"\"Session runner specialized for testing of object detection task with safeguard against model", "\"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing of object detection", "for testing of regression task with safeguard against model training. .. seealso:: |", "safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.regr.RegressionTrainer` \"\"\"", "ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester):", "for testing of image segmentation task with safeguard against model training. .. seealso::", "detection task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer`", "| :class:`thelper.train.classif.ImageClassifTrainer` \"\"\" @thelper.concepts.detection class ObjDetectTester(ObjDetectTrainer, Tester): \"\"\"Session runner specialized for testing of", "testing of object detection task with safeguard against model training. .. seealso:: |", "from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification", "testing of image segmentation task with safeguard against model training. .. seealso:: |", "from thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session", "testing of image classification task with safeguard against model training. .. seealso:: |", "\"\"\"Session runner specialized for testing of image segmentation task with safeguard against model", "task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` |", "with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer`", "thelper.concepts from thelper.infer.base import Tester from thelper.train.classif import ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer", "image classification task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester` |", "ImageClassifTrainer from thelper.train.detect import ObjDetectTrainer from thelper.train.regr import RegressionTrainer from thelper.train.segm import ImageSegmTrainer", ".. seealso:: | :class:`thelper.train.base.Tester` | :class:`thelper.train.base.Trainer` | :class:`thelper.train.detect.ObjDetectTrainer` \"\"\" @thelper.concepts.regression class RegressionTester(RegressionTrainer, Tester):", "testing of regression task with safeguard against model training. .. seealso:: | :class:`thelper.train.base.Tester`", "ImageSegmTrainer @thelper.concepts.classification class ImageClassifTester(ImageClassifTrainer, Tester): \"\"\"Session runner specialized for testing of image classification", "runner specialized for testing of image segmentation task with safeguard against model training." ]
[ "] # Connect to the database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host", "'\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'',", "result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\",", "12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect to the database", "'2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None))", "write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE", "tearDown(self): # Clean out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE", "trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,),", "self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend FROM", "FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,),", "'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5, 'name':", "{ 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10", "4 (5,), (10,), (10,), (5,), # Package 5 (5, ), (5, ), (5,)])", "), (5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER", "<PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT']", "<PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host}", "# Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\") scores =", "packages ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5',", "'2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27'))", "test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\")", "continue # Insert similarity score into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b,", "), (5, ), # Package 1 (5,), (10,), (10,), (5,), # Package 2", "self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES", "app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'',", "set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0: continue # Insert similarity", "how much overlap the two packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) /", "id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor)", "4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved':", "'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] },", "& set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0: continue # Insert", "self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend", "(id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']},", "}, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified':", "| set(p2['app_ids'])) if similarity == 0: continue # Insert similarity score into database", "psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with package data for p in package_data:", "USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST", "package data for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month,", "test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores,", "(5,), # Package 5 (5, ), (5, ), (5,)]) # Package 8 def", "'app_ids': app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories':", "packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)])", "'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id':", "'\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect to the", "'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids':", "'\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25", "= self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name,", "1, 2 ] # Package data package_data = [ { 'id': 1, 'name':", "import unittest import psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing,", "app_data = [ 1, 2 ] # Package data package_data = [ {", "('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning up the test database", "url, display_date FROM packages ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js',", "'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids':", "'\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'',", "metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2',", "f.close() \"\"\" Tests for the ML pipeline model \"\"\" def setUp(self): # Application", "package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5, ), # Package", "BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self):", "psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase):", "# Populate with package data for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages", "os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database}", "= <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT =", "'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2, 'name':", "package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER BY id;\") metadata = self.cursor.fetchall()", "def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN =", "ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) #", "FROM packages ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,),", "'\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids':", "self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute", "host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user}", "5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25", "{ 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null',", "f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate", "10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month':", "= os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection =", "(5,), (10,), (10,), (5,), # Package 2 (5,), (10,), (10,), (5,), # Package", "# Package 4 (5,), (10,), (10,), (5,), # Package 5 (5, ), (5,", "unittest import psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores", "FROM similarity; DELETE FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self):", "2 ] # Package data package_data = [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'',", "package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean out all", "'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning up the test database if", "'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4, 'name':", "id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2],", "{ 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14", "{ 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20", "# Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\")", "self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3],", "(5,), # Package 4 (5,), (10,), (10,), (5,), # Package 5 (5, ),", "by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) # Check", "'\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'',", "# Connect to the database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host =", "trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,),", "'\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago':", "def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER BY id;\") metadata", "= os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST =", "for p1 in package_data: for p2 in package_data: if p1['id'] == p2['id']: continue", "database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE')", "model \"\"\" import os import unittest import psycopg2 from model.database import update_bounded_similarity_scores, \\", "= [ 1, 2 ] # Package data package_data = [ { 'id':", "# Package data package_data = [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451,", "'\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8, 'name':", "monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']});", "self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod", "== p2['id']: continue # Determine how much overlap the two packages have similarity", "open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the ML pipeline model \"\"\"", "two packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if", "(10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM", "14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month':", "self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a,", "(5,), (10,), (10,), (5,), # Package 5 (5, ), (5, ), (5,)]) #", "'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0,", "import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls):", "the two packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids']))", "#closing and cleaning up the test database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\")", "in package_data: for p2 in package_data: if p1['id'] == p2['id']: continue # Determine", "connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor()", "scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT", "'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved':", "19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771,", "if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close() print(\"PostgreSQL connection is closed", "# Determine how much overlap the two packages have similarity = len(set(p1['app_ids']) &", "'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect to", "(10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages", "packages ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)])", "all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE FROM", "p1 in package_data: for p2 in package_data: if p1['id'] == p2['id']: continue #", "password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with package", "'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'',", "psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\"", "\"\"\") def tearDown(self): # Clean out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM", "with package data for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name,", "os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None", "('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4',", "'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27", "'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data },", "'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id':", "(1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER BY", "data for p1 in package_data: for p2 in package_data: if p1['id'] == p2['id']:", "(2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER", "'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router',", "def tearDown(self): # Clean out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity;", "Package 2 (5,), (10,), (10,), (5,), # Package 4 (5,), (10,), (10,), (5,),", "data app_data = [ 1, 2 ] # Package data package_data = [", "= self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check", "self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date", "display_date FROM packages ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14'))", "'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25", "package_data: for p2 in package_data: if p1['id'] == p2['id']: continue # Determine how", "# Package 2 (5,), (10,), (10,), (5,), # Package 4 (5,), (10,), (10,),", "(10,), (10,), (5,), # Package 4 (5,), (10,), (10,), (5,), # Package 5", "os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None result = None", "= os.environ['DB_PORT'] connection = None result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST,", "# Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\") scores =", "similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean", "user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with", "overlap the two packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) |", "cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the ML pipeline model \"\"\" def setUp(self):", "the ML pipeline model \"\"\" def setUp(self): # Application data app_data = [", "set(p2['app_ids'])) if similarity == 0: continue # Insert similarity score into database self.cursor.execute(f\"\"\"", "BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self):", "1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id':", "(8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER", "VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity", "similarity; DELETE FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor)", "app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'',", "'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data", "self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [", "pipeline model \"\"\" import os import unittest import psycopg2 from model.database import update_bounded_similarity_scores,", "for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories,", "= len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0: continue", "\"\"\") # Populate with similarity data for p1 in package_data: for p2 in", "cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit()", "Tests for the pipeline model \"\"\" import os import unittest import psycopg2 from", "p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified,", "similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean out all data", "None result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f =", "test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER BY id;\") metadata =", "def setUp(self): # Application data app_data = [ 1, 2 ] # Package", "None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning up the", "# Populate with similarity data for p1 in package_data: for p2 in package_data:", "database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\")", "out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE", "os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor =", "[ 1, 2 ] # Package data package_data = [ { 'id': 1,", "\"\"\" Tests for the pipeline model \"\"\" import os import unittest import psycopg2", "'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0,", "setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN']", "os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None result = None cls.connection = psycopg2.connect(user=USER,", "import os import unittest import psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores,", "FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,),", "INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']},", "relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores,", "FROM dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close()", "'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id':", "{p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean out all data self.cursor.execute(\"DELETE FROM dependencies;", "400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:]", "'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect to the database user", "bounded_similarity FROM similarity ORDER BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,),", "retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate", "INSERT INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self):", "{p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity data for p1 in", "'\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5,", "\"\"\" def setUp(self): # Application data app_data = [ 1, 2 ] #", "(5,), (6,), (1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER", "p1['id'] == p2['id']: continue # Determine how much overlap the two packages have", "5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data", "'app_ids': app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories':", "Package 4 (5,), (10,), (10,), (5,), # Package 5 (5, ), (5, ),", "database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the ML", "cls.connection.commit() f.close() \"\"\" Tests for the ML pipeline model \"\"\" def setUp(self): #", "'2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing", "'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector',", "Package data package_data = [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago':", "(10,), (5,), # Package 5 (5, ), (5, ), (5,)]) # Package 8", "pipeline model \"\"\" def setUp(self): # Application data app_data = [ 1, 2", "(10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend", "'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'',", "self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0],", "'\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] #", "data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE FROM packages;\")", "os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database =", "{similarity}); \"\"\") def tearDown(self): # Clean out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE", "0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data },", "[(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM", ") VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with", "for p2 in package_data: if p1['id'] == p2['id']: continue # Determine how much", "self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,),", "Determine how much overlap the two packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids']))", "len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0: continue # Insert similarity score into", "self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with package data for p", "port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the", "'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data },", "score into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']},", "'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1]", "Connect to the database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST')", "FROM similarity ORDER BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5,", "self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning up the test", "(1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\")", "host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for", "'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved':", "package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>']", "monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']},", "relative_trend FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,),", "ORDER BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5,", "= os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port =", "INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): #", "= open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the ML pipeline model", "def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by", "0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, {", "p2 in package_data: if p1['id'] == p2['id']: continue # Determine how much overlap", "'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'',", "port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with package data for", "DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity", "p2['id']: continue # Determine how much overlap the two packages have similarity =", "(5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\") scores", "'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month':", "import psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class", "Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\") scores = self.cursor.fetchall()", "the pipeline model \"\"\" import os import unittest import psycopg2 from model.database import", "update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD", "{p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity data for p1 in package_data: for", "self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend", "= None result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f", "in package_data: if p1['id'] == p2['id']: continue # Determine how much overlap the", "TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE']", "= psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with package data for p in", "(10,), (5,), # Package 2 (5,), (10,), (10,), (5,), # Package 4 (5,),", "for the pipeline model \"\"\" import os import unittest import psycopg2 from model.database", "(2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages", "update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\") scores", "2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved':", "# Package 5 (5, ), (5, ), (5,)]) # Package 8 def test_update_popularity_scores(self):", "user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port", "19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0,", "Application data app_data = [ 1, 2 ] # Package data package_data =", "[(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) # Check absolute trend self.cursor.execute(\"SELECT", "771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, {", "'\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4,", "'2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning up the test database if cls.connection:", "BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20'))", "({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity data", "setUp(self): # Application data app_data = [ 1, 2 ] # Package data", "scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor) #", "self.database.cursor() # Populate with package data for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO", "package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved )", "'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'',", "'\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect to the database user =", "'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids':", "self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4],", "tearDownClass(cls): #closing and cleaning up the test database if cls.connection: f = open(\"tests/deprovision_db.sql\",", "os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT')", "def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;\") scores =", "packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']},", "self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10'))", "app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react", "{p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity data for p1", "self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,),", "database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\"", "\"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the ML pipeline model \"\"\" def", "absolute trend self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores,", "= self.database.cursor() # Populate with package data for p in package_data: self.cursor.execute(f\"\"\" INSERT", "= [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null',", "12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month':", "0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids':", "PORT = os.environ['DB_PORT'] connection = None result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD,", "} ] # Connect to the database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD')", "update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER", "(5, ), (5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages", "self.cursor = self.database.cursor() # Populate with package data for p in package_data: self.cursor.execute(f\"\"\"", "(5,), (5, ), (5, ), # Package 1 (5,), (10,), (10,), (5,), #", "VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean out all data self.cursor.execute(\"DELETE", "(10,), (5,), # Package 4 (5,), (10,), (10,), (5,), # Package 5 (5,", "HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None result = None cls.connection", "similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0:", "'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'',", "'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2,", "1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved':", "# Insert similarity score into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity)", "for the ML pipeline model \"\"\" def setUp(self): # Application data app_data =", "self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url,", "8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\") scores =", "@classmethod def tearDownClass(cls): #closing and cleaning up the test database if cls.connection: f", "absolute_trend FROM packages ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,),", "'\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'',", "None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read())", "[ (5,), (5, ), (5, ), # Package 1 (5,), (10,), (10,), (5,),", "'\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago':", "self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and", "test database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close() print(\"PostgreSQL connection", "'app_ids': app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories':", "password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests", "2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, {", "Insert similarity score into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity) VALUES", "('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning", "applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM", "{p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity data for p1 in package_data:", "id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def test_update_trending_scores(self): update_trending_scores(self.cursor)", "(5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY", "0: continue # Insert similarity score into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a,", "(5,), (10,), (10,), (5,), # Package 4 (5,), (10,), (10,), (5,), # Package", "os.environ['DB_PORT'] connection = None result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT,", "BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5, ),", "= self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT", "= <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string =", "port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string)", "Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY id;\") scores = self.cursor.fetchall()", "os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST']", "19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316,", "class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE =", "continue # Determine how much overlap the two packages have similarity = len(set(p1['app_ids'])", "19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818,", "'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls):", "much overlap the two packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids'])", "DELETE FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT", "Package 5 (5, ), (5, ), (5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor)", "'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221,", "self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE FROM packages;\") self.database.commit()", "19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect to the database user = os.environ.get('DB_USER')", "similarity data for p1 in package_data: for p2 in package_data: if p1['id'] ==", "= os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database", "(10,), (10,), (5,), # Package 2 (5,), (10,), (10,), (5,), # Package 4", "Clean out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM applications;", "scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5, ), # Package 1", "# Application data app_data = [ 1, 2 ] # Package data package_data", "= self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5, ), # Package 1 (5,),", "FROM packages ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1],", "router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ] # Connect", "{ 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified':", "DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection", "'app_ids': app_data[:1] } ] # Connect to the database user = os.environ.get('DB_USER') password", "), (5, ), (5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM", "'\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'',", "packages have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity", "= f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() #", "packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY", "(6,), (1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages ORDER BY", "data for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago,", "ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3',", "Package 1 (5,), (10,), (10,), (5,), # Package 2 (5,), (10,), (10,), (5,),", "@classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN", "2 (5,), (10,), (10,), (5,), # Package 4 (5,), (10,), (10,), (5,), #", "app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'',", "(5, ), (5, ), # Package 1 (5,), (10,), (10,), (5,), # Package", "{p['retrieved']}); \"\"\") # Populate with similarity data for p1 in package_data: for p2", "data package_data = [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0,", "the database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database =", "modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") #", "({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean out all data self.cursor.execute(\"DELETE FROM", "dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close()", "# Package 1 (5,), (10,), (10,), (5,), # Package 2 (5,), (10,), (10,),", "FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER", "Tests for the ML pipeline model \"\"\" def setUp(self): # Application data app_data", "\"\"\" Tests for the ML pipeline model \"\"\" def setUp(self): # Application data", "update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,),", "def tearDownClass(cls): #closing and cleaning up the test database if cls.connection: f =", "self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5, ), # Package 1 (5,), (10,),", "cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close() print(\"PostgreSQL connection is closed succesfully\")", "'\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago':", "def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\") scores = self.cursor.fetchall()", "= os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password} dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor", "}, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified':", "'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified': '\\'2019-03-14 12:42:34.846-07\\'', 'retrieved': '\\'2020-04-25", "app_data[:1] } ] # Connect to the database user = os.environ.get('DB_USER') password =", "similarity score into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']},", "('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1',", "if p1['id'] == p2['id']: continue # Determine how much overlap the two packages", "DELETE FROM similarity; DELETE FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def", "REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None result", "password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string", "= os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None result = None cls.connection =", "len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0: continue #", "'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, { 'id': 8,", "FROM applications; DELETE FROM packages;\") self.database.commit() self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity", "/ len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity == 0: continue # Insert similarity score", "# Clean out all data self.cursor.execute(\"DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM", "\\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER =", "[ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories': 'null', 'modified':", "PASSWORD = <PASSWORD>['<PASSWORD>'] DATABASE = os.environ['DB_DATABASE'] REAL_TOKEN = os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT", "to the database user = os.environ.get('DB_USER') password = <PASSWORD>('DB_PASSWORD') host = os.environ.get('DB_HOST') database", "packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)])", "self.cursor.execute(\"SELECT absolute_trend FROM packages ORDER by id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,),", "{p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\") # Populate with similarity data for", "short_name, url, display_date FROM packages ORDER BY id;\") metadata = self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2',", "up the test database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close()", "ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)]) def", "'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25", "(1,)]) def test_package_table_postprocessing(self): package_table_postprocessing(self.cursor) self.cursor.execute(\"SELECT short_name, url, display_date FROM packages ORDER BY id;\")", "(5, ), (5, ), (5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity", "'retrieved': '\\'2020-04-25 19:03:37.409069-07\\'', 'app_ids': app_data[-1:] }, { 'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004,", "and cleaning up the test database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read())", "name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']},", "f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close() print(\"PostgreSQL connection is closed succesfully\") f.close()", "'id': 2, 'name': '\\'pkg:npm/d3@5\\'', 'monthly_downloads_last_month': 5306004, 'monthly_downloads_a_year_ago': 2966818, 'categories': '\\'{dom,visualization,svg,animation,canvas}\\'', 'modified': '\\'2020-04-20 10:59:10.332-07\\'',", "categories, modified, retrieved ) VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']}); \"\"\")", "}, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified':", "into database self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity});", "ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)]) def", "have similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids'])) if similarity ==", "scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) # Check relative trend", "os import unittest import psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\", "= self.cursor.fetchall() self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14')) self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20')) self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl',", "from model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod", "with similarity data for p1 in package_data: for p2 in package_data: if p1['id']", "similarity ORDER BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ),", "self.cursor.close() self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;\")", "= None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\")", "INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved ) VALUES ({p['id']}, {p['name']},", "model \"\"\" def setUp(self): # Application data app_data = [ 1, 2 ]", "'app_ids': app_data }, { 'id': 5, 'name': '\\'pkg:npm/react-resize-detector@4\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 1957316, 'categories':", "dbname={database} port={port}\" self.database = psycopg2.connect(connection_string) self.cursor = self.database.cursor() # Populate with package data", "update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER']", "self.cursor.execute(f\"\"\" INSERT INTO similarity (package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def", "self.assertListEqual(scores, [ (5,), (5, ), (5, ), # Package 1 (5,), (10,), (10,),", "(10,), (10,), (5,), # Package 5 (5, ), (5, ), (5,)]) # Package", "'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] } ]", "(package_a, package_b, similarity) VALUES ({p1['id']}, {p2['id']}, {similarity}); \"\"\") def tearDown(self): # Clean out", "if similarity == 0: continue # Insert similarity score into database self.cursor.execute(f\"\"\" INSERT", "Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_popularity FROM packages ORDER BY id;\") scores", "(5,), # Package 2 (5,), (10,), (10,), (5,), # Package 4 (5,), (10,),", "'\\'2020-04-20 10:59:10.332-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'',", "Populate with package data for p in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id,", "= os.environ.get('DB_HOST') database = os.environ.get('DB_DATABASE') port = os.environ.get('DB_PORT') connection_string = f\"host={host} user={user} password={password}", "'\\'2020-04-25 19:03:37.421523-07\\'', 'app_ids': app_data }, { 'id': 4, 'name': '\\'pkg:npm/globe.gl@2\\'', 'monthly_downloads_last_month': 2221, 'monthly_downloads_a_year_ago':", "test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;\") scores = self.cursor.fetchall()", "), # Package 1 (5,), (10,), (10,), (5,), # Package 2 (5,), (10,),", "(5, ), # Package 1 (5,), (10,), (10,), (5,), # Package 2 (5,),", "update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores,", "'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def tearDownClass(cls): #closing and cleaning up", "package_data = [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month': 400451, 'monthly_downloads_a_year_ago': 0, 'categories':", "ML pipeline model \"\"\" def setUp(self): # Application data app_data = [ 1,", "('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10')) self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None)) self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27')) @classmethod def", "= os.environ['GH_TOKEN'] HOST = os.environ['DB_HOST'] PORT = os.environ['DB_PORT'] connection = None result =", "8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'',", "'monthly_downloads_a_year_ago': 1957316, 'categories': '\\'{react,resize,detector}\\'', 'modified': 'null', 'retrieved': '\\'2020-04-25 19:03:37.429703-07\\'', 'app_ids': app_data }, {", "package_a, package_b;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [ (5,), (5, ), (5, ), #", "the test database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close() print(\"PostgreSQL", "= psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close()", "] # Package data package_data = [ { 'id': 1, 'name': '\\'pkg:npm/countup.js@2\\'', 'monthly_downloads_last_month':", "0, 'categories': '\\'{react,\"react router\"}\\'', 'modified': '\\'2020-02-27 12:14:25.729-08\\'', 'retrieved': '\\'2020-04-25 19:03:37.434285-07\\'', 'app_ids': app_data[:1] }", "connection = None result = None cls.connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE)", "\\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): USER = os.environ['DB_USER'] PASSWORD =", "in package_data: self.cursor.execute(f\"\"\" INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved", "f = open(\"tests/provision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() f.close() \"\"\" Tests for the ML pipeline", "self.database.close() def test_update_bounded_similarity_scores(self): update_bounded_similarity_scores(self.cursor) self.cursor.execute(\"SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;\") scores", "2221, 'monthly_downloads_a_year_ago': 771, 'categories': '\\'{webgl,three,globe,geo,spherical,projection,orthographic}\\'', 'modified': '\\'2020-04-10 14:13:59.518-07\\'', 'retrieved': '\\'2020-04-25 19:03:37.426579-07\\'', 'app_ids': app_data", "[(10,), (5,), (6,), (1,), (5,)]) # Check relative trend self.cursor.execute(\"SELECT relative_trend FROM packages", "package_data: if p1['id'] == p2['id']: continue # Determine how much overlap the two", "model.database import update_bounded_similarity_scores, \\ update_popularity_scores, update_trending_scores, \\ package_table_postprocessing, write_similarity_scores class TestModel(unittest.TestCase): @classmethod def", "id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)]) # Check relative", "cleaning up the test database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit()", "bounded_popularity FROM packages ORDER BY id;\") scores = self.cursor.fetchall() self.assertListEqual(scores, [(8,), (10,), (10,),", "similarity == 0: continue # Insert similarity score into database self.cursor.execute(f\"\"\" INSERT INTO", "== 0: continue # Insert similarity score into database self.cursor.execute(f\"\"\" INSERT INTO similarity", "database if cls.connection: f = open(\"tests/deprovision_db.sql\", \"r\") cls.connection.cursor().execute(f.read()) cls.connection.commit() cls.connection.close() print(\"PostgreSQL connection is", "5 (5, ), (5, ), (5,)]) # Package 8 def test_update_popularity_scores(self): update_popularity_scores(self.cursor) self.cursor.execute(\"SELECT", "}, { 'id': 8, 'name': '\\'pkg:npm/@reach/router@1\\'', 'monthly_downloads_last_month': 0, 'monthly_downloads_a_year_ago': 0, 'categories': '\\'{react,\"react router\"}\\'',", "\"\"\" import os import unittest import psycopg2 from model.database import update_bounded_similarity_scores, \\ update_popularity_scores,", "1 (5,), (10,), (10,), (5,), # Package 2 (5,), (10,), (10,), (5,), #", "Populate with similarity data for p1 in package_data: for p2 in package_data: if" ]
[ "self.max_val idxs.append((0, col)) val = sign_h * sign_v * val data.append(val) table[col] +=", "TagsHasher. This is because the closeness of the value of each feature is", "meaningless it is basically the result of a hash function. Hence, only identity", "v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray, **kwargs):", "idx, doc in enumerate(docs): if doc.tags: idxs, data = [], [] # sparse", "sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense", "considered as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val =", "csr_matrix or dense ndarray. Note that this feature requires ``scipy`` :param text_attrs: which", "str): v = v.strip() if v.lower() in {'true', 'yes'}: # parse boolean parameter", "return 0 if isinstance(v, (tuple, dict, list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(),", "the resulting feature matrix should be a sparse csr_matrix or dense ndarray. Note", "large numbers will cause larger overall parameter dimensions. :param sparse: whether the resulting", "each feature is meaningless it is basically the result of a hash function.", "enumerate(docs): if doc.tags: idxs, data = [], [] # sparse table = np.zeros(self.n_dim)", "sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray, **kwargs): if self.sparse: from", "arbitrary set of tags into a fixed-dimensional matrix using the hashing trick. Unlike", "``scipy`` :param text_attrs: which attributes to be considered as text attributes. :param kwargs:", "table[col] += val if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1, self.n_dim) )", "self.sparse = sparse def _any_hash(self, v): try: return int(v) # parse int parameter", "Note that this feature requires ``scipy`` :param text_attrs: which attributes to be considered", "h % self.n_dim val = self._any_hash(v) sign_v = np.sign(val) val = val %", "1 if v.lower() in {'false', 'no'}: return 0 if isinstance(v, (tuple, dict, list)):", "distance when searching documents embedded with TagsHasher. This is because the closeness of", "import hashlib import json import numpy as np from jina import Executor, DocumentArray,", "sparse table = np.zeros(self.n_dim) # dense for k, v in doc.tags.items(): h =", "n_dim self.max_val = max_val self.hash = hashlib.md5 self.sparse = sparse def _any_hash(self, v):", "val if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1, self.n_dim) ) else: doc.embedding", "set of tags into a fixed-dimensional matrix using the hashing trick. Unlike FeatureHashser,", "class TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags into a fixed-dimensional matrix using", "= max_val self.hash = hashlib.md5 self.sparse = sparse def _any_hash(self, v): try: return", "parse float parameter except ValueError: if not v: # ignore it when the", "= n_dim self.max_val = max_val self.hash = hashlib.md5 self.sparse = sparse def _any_hash(self,", "isinstance(v, str): v = v.strip() if v.lower() in {'true', 'yes'}: # parse boolean", "a hash function. Hence, only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def", "of the value of each feature is meaningless it is basically the result", "except ValueError: if not v: # ignore it when the parameter is empty", "json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray, **kwargs): if self.sparse:", "sign_v * val data.append(val) table[col] += val if self.sparse: doc.embedding = csr_matrix( (data,", "'yes'}: # parse boolean parameter return 1 if v.lower() in {'false', 'no'}: return", "in {'false', 'no'}: return 0 if isinstance(v, (tuple, dict, list)): v = json.dumps(v,", "into a fixed-dimensional matrix using the hashing trick. Unlike FeatureHashser, you should only", "v): try: return int(v) # parse int parameter except ValueError: try: return float(v)", "or dense ndarray. Note that this feature requires ``scipy`` :param text_attrs: which attributes", "Jaccard/Hamming distance when searching documents embedded with TagsHasher. This is because the closeness", "\"\"\" def __init__(self, n_dim: int = 256, max_val: int = 65536, sparse: bool", "json import numpy as np from jina import Executor, DocumentArray, requests class TagsHasher(Executor):", "import numpy as np from jina import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert", "is meaningless it is basically the result of a hash function. Hence, only", "encode(self, docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse import csr_matrix for idx, doc", "return 1 if v.lower() in {'false', 'no'}: return 0 if isinstance(v, (tuple, dict,", "FeatureHashser, you should only use Jaccard/Hamming distance when searching documents embedded with TagsHasher.", "import json import numpy as np from jina import Executor, DocumentArray, requests class", "dimensionality of each document in the output embedding. Small numbers of features are", "self.hash = hashlib.md5 self.sparse = sparse def _any_hash(self, v): try: return int(v) #", "in {'true', 'yes'}: # parse boolean parameter return 1 if v.lower() in {'false',", "TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags into a fixed-dimensional matrix using the", "+= val if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1, self.n_dim) ) else:", "self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1, self.n_dim) ) else: doc.embedding = table", "overall parameter dimensions. :param sparse: whether the resulting feature matrix should be a", "Hence, only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int", "the output embedding. Small numbers of features are likely to cause hash collisions,", "an arbitrary set of tags into a fixed-dimensional matrix using the hashing trick.", "= sparse def _any_hash(self, v): try: return int(v) # parse int parameter except", "def _any_hash(self, v): try: return int(v) # parse int parameter except ValueError: try:", "np.sign(val) val = val % self.max_val idxs.append((0, col)) val = sign_h * sign_v", "isinstance(v, (tuple, dict, list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def", "you should only use Jaccard/Hamming distance when searching documents embedded with TagsHasher. This", "data = [], [] # sparse table = np.zeros(self.n_dim) # dense for k,", "to cause hash collisions, but large numbers will cause larger overall parameter dimensions.", ":param sparse: whether the resulting feature matrix should be a sparse csr_matrix or", "{'false', 'no'}: return 0 if isinstance(v, (tuple, dict, list)): v = json.dumps(v, sort_keys=True)", "val data.append(val) table[col] += val if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1,", "{'true', 'yes'}: # parse boolean parameter return 1 if v.lower() in {'false', 'no'}:", "idxs, data = [], [] # sparse table = np.zeros(self.n_dim) # dense for", "the value of each feature is meaningless it is basically the result of", "cause hash collisions, but large numbers will cause larger overall parameter dimensions. :param", "scipy.sparse import csr_matrix for idx, doc in enumerate(docs): if doc.tags: idxs, data =", "dense for k, v in doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h) col", "v = v.strip() if v.lower() in {'true', 'yes'}: # parse boolean parameter return", "Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching documents embedded with", "sign_v = np.sign(val) val = val % self.max_val idxs.append((0, col)) val = sign_h", "% self.max_val idxs.append((0, col)) val = sign_h * sign_v * val data.append(val) table[col]", "doc in enumerate(docs): if doc.tags: idxs, data = [], [] # sparse table", "= hashlib.md5 self.sparse = sparse def _any_hash(self, v): try: return int(v) # parse", "feature is meaningless it is basically the result of a hash function. Hence,", "k, v in doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h) col = h", "dimensions. :param sparse: whether the resulting feature matrix should be a sparse csr_matrix", "empty return 0 if isinstance(v, str): v = v.strip() if v.lower() in {'true',", "if doc.tags: idxs, data = [], [] # sparse table = np.zeros(self.n_dim) #", "larger overall parameter dimensions. :param sparse: whether the resulting feature matrix should be", "self.n_dim = n_dim self.max_val = max_val self.hash = hashlib.md5 self.sparse = sparse def", "csr_matrix for idx, doc in enumerate(docs): if doc.tags: idxs, data = [], []", "is empty return 0 if isinstance(v, str): v = v.strip() if v.lower() in", "that this feature requires ``scipy`` :param text_attrs: which attributes to be considered as", "hashlib.md5 self.sparse = sparse def _any_hash(self, v): try: return int(v) # parse int", "n_dim: int = 256, max_val: int = 65536, sparse: bool = False, **kwargs):", "only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int =", "**kwargs): \"\"\" :param n_dim: the dimensionality of each document in the output embedding.", "trick. Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching documents embedded", "when searching documents embedded with TagsHasher. This is because the closeness of the", "matrix using the hashing trick. Unlike FeatureHashser, you should only use Jaccard/Hamming distance", "parameter except ValueError: try: return float(v) # parse float parameter except ValueError: if", "for k, v in doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h) col =", "identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int = 256,", "should be a sparse csr_matrix or dense ndarray. Note that this feature requires", "if not v: # ignore it when the parameter is empty return 0", "= sign_h * sign_v * val data.append(val) table[col] += val if self.sparse: doc.embedding", "it is basically the result of a hash function. Hence, only identity value", "numbers will cause larger overall parameter dimensions. :param sparse: whether the resulting feature", "Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags into a", "as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val", "[] # sparse table = np.zeros(self.n_dim) # dense for k, v in doc.tags.items():", "try: return float(v) # parse float parameter except ValueError: if not v: #", "to be considered as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim", "bool = False, **kwargs): \"\"\" :param n_dim: the dimensionality of each document in", "parse boolean parameter return 1 if v.lower() in {'false', 'no'}: return 0 if", "# sparse table = np.zeros(self.n_dim) # dense for k, v in doc.tags.items(): h", "= [], [] # sparse table = np.zeros(self.n_dim) # dense for k, v", "but large numbers will cause larger overall parameter dimensions. :param sparse: whether the", "max_val: int = 65536, sparse: bool = False, **kwargs): \"\"\" :param n_dim: the", "this feature requires ``scipy`` :param text_attrs: which attributes to be considered as text", "resulting feature matrix should be a sparse csr_matrix or dense ndarray. Note that", "collisions, but large numbers will cause larger overall parameter dimensions. :param sparse: whether", "output embedding. Small numbers of features are likely to cause hash collisions, but", "= v.strip() if v.lower() in {'true', 'yes'}: # parse boolean parameter return 1", "boolean parameter return 1 if v.lower() in {'false', 'no'}: return 0 if isinstance(v,", "Small numbers of features are likely to cause hash collisions, but large numbers", "base=16) @requests def encode(self, docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse import csr_matrix", "table = np.zeros(self.n_dim) # dense for k, v in doc.tags.items(): h = self._any_hash(k)", "= False, **kwargs): \"\"\" :param n_dim: the dimensionality of each document in the", "return 0 if isinstance(v, str): v = v.strip() if v.lower() in {'true', 'yes'}:", "numpy as np from jina import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an", "result of a hash function. Hence, only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing", "the hashing trick. Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching", "DocumentArray, **kwargs): if self.sparse: from scipy.sparse import csr_matrix for idx, doc in enumerate(docs):", "np.sign(h) col = h % self.n_dim val = self._any_hash(v) sign_v = np.sign(val) val", "the dimensionality of each document in the output embedding. Small numbers of features", "because the closeness of the value of each feature is meaningless it is", "basically the result of a hash function. Hence, only identity value matters. More", "idxs.append((0, col)) val = sign_h * sign_v * val data.append(val) table[col] += val", "sign_h = np.sign(h) col = h % self.n_dim val = self._any_hash(v) sign_v =", "np.zeros(self.n_dim) # dense for k, v in doc.tags.items(): h = self._any_hash(k) sign_h =", "(tuple, dict, list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self,", "for idx, doc in enumerate(docs): if doc.tags: idxs, data = [], [] #", "256, max_val: int = 65536, sparse: bool = False, **kwargs): \"\"\" :param n_dim:", "parameter is empty return 0 if isinstance(v, str): v = v.strip() if v.lower()", "np from jina import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set", "cause larger overall parameter dimensions. :param sparse: whether the resulting feature matrix should", "text_attrs: which attributes to be considered as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs)", "@requests def encode(self, docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse import csr_matrix for", "float(v) # parse float parameter except ValueError: if not v: # ignore it", "in the output embedding. Small numbers of features are likely to cause hash", "are likely to cause hash collisions, but large numbers will cause larger overall", "# dense for k, v in doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h)", ":param n_dim: the dimensionality of each document in the output embedding. Small numbers", "be considered as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val", "the parameter is empty return 0 if isinstance(v, str): v = v.strip() if", "self.max_val = max_val self.hash = hashlib.md5 self.sparse = sparse def _any_hash(self, v): try:", "More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int = 256, max_val: int =", "using the hashing trick. Unlike FeatureHashser, you should only use Jaccard/Hamming distance when", "= 65536, sparse: bool = False, **kwargs): \"\"\" :param n_dim: the dimensionality of", "v: # ignore it when the parameter is empty return 0 if isinstance(v,", "self._any_hash(v) sign_v = np.sign(val) val = val % self.max_val idxs.append((0, col)) val =", "self._any_hash(k) sign_h = np.sign(h) col = h % self.n_dim val = self._any_hash(v) sign_v", "col)) val = sign_h * sign_v * val data.append(val) table[col] += val if", "doc.tags: idxs, data = [], [] # sparse table = np.zeros(self.n_dim) # dense", "= self._any_hash(v) sign_v = np.sign(val) val = val % self.max_val idxs.append((0, col)) val", "matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int = 256, max_val: int", "if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1, self.n_dim) ) else: doc.embedding =", "data.append(val) table[col] += val if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)), shape=(1, self.n_dim)", "each document in the output embedding. Small numbers of features are likely to", "hashing trick. Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching documents", "be a sparse csr_matrix or dense ndarray. Note that this feature requires ``scipy``", "= np.sign(val) val = val % self.max_val idxs.append((0, col)) val = sign_h *", "def encode(self, docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse import csr_matrix for idx,", "ignore it when the parameter is empty return 0 if isinstance(v, str): v", "# ignore it when the parameter is empty return 0 if isinstance(v, str):", "sign_h * sign_v * val data.append(val) table[col] += val if self.sparse: doc.embedding =", "return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse", "int parameter except ValueError: try: return float(v) # parse float parameter except ValueError:", "of a hash function. Hence, only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\"", "hash function. Hence, only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self,", "will cause larger overall parameter dimensions. :param sparse: whether the resulting feature matrix", "= json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray, **kwargs): if", "which attributes to be considered as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim", "fixed-dimensional matrix using the hashing trick. Unlike FeatureHashser, you should only use Jaccard/Hamming", "_any_hash(self, v): try: return int(v) # parse int parameter except ValueError: try: return", "False, **kwargs): \"\"\" :param n_dim: the dimensionality of each document in the output", "* val data.append(val) table[col] += val if self.sparse: doc.embedding = csr_matrix( (data, zip(*idxs)),", "max_val self.hash = hashlib.md5 self.sparse = sparse def _any_hash(self, v): try: return int(v)", "use Jaccard/Hamming distance when searching documents embedded with TagsHasher. This is because the", "embedded with TagsHasher. This is because the closeness of the value of each", "hashlib import json import numpy as np from jina import Executor, DocumentArray, requests", "only use Jaccard/Hamming distance when searching documents embedded with TagsHasher. This is because", "requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags into a fixed-dimensional matrix", "embedding. Small numbers of features are likely to cause hash collisions, but large", "of tags into a fixed-dimensional matrix using the hashing trick. Unlike FeatureHashser, you", "dense ndarray. Note that this feature requires ``scipy`` :param text_attrs: which attributes to", "65536, sparse: bool = False, **kwargs): \"\"\" :param n_dim: the dimensionality of each", "if v.lower() in {'false', 'no'}: return 0 if isinstance(v, (tuple, dict, list)): v", "'no'}: return 0 if isinstance(v, (tuple, dict, list)): v = json.dumps(v, sort_keys=True) return", "import csr_matrix for idx, doc in enumerate(docs): if doc.tags: idxs, data = [],", "the result of a hash function. Hence, only identity value matters. More info:", "int = 256, max_val: int = 65536, sparse: bool = False, **kwargs): \"\"\"", "function. Hence, only identity value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim:", "parameter return 1 if v.lower() in {'false', 'no'}: return 0 if isinstance(v, (tuple,", "is because the closeness of the value of each feature is meaningless it", "value matters. More info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int = 256, max_val:", "val = sign_h * sign_v * val data.append(val) table[col] += val if self.sparse:", "v.lower() in {'true', 'yes'}: # parse boolean parameter return 1 if v.lower() in", "matrix should be a sparse csr_matrix or dense ndarray. Note that this feature", "docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse import csr_matrix for idx, doc in", "parameter dimensions. :param sparse: whether the resulting feature matrix should be a sparse", "in doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h) col = h % self.n_dim", "# parse float parameter except ValueError: if not v: # ignore it when", "feature matrix should be a sparse csr_matrix or dense ndarray. Note that this", "ndarray. Note that this feature requires ``scipy`` :param text_attrs: which attributes to be", "float parameter except ValueError: if not v: # ignore it when the parameter", "sparse: bool = False, **kwargs): \"\"\" :param n_dim: the dimensionality of each document", "try: return int(v) # parse int parameter except ValueError: try: return float(v) #", "ValueError: try: return float(v) # parse float parameter except ValueError: if not v:", "= self._any_hash(k) sign_h = np.sign(h) col = h % self.n_dim val = self._any_hash(v)", ":param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val self.hash = hashlib.md5", "of each document in the output embedding. Small numbers of features are likely", "is basically the result of a hash function. Hence, only identity value matters.", "__init__(self, n_dim: int = 256, max_val: int = 65536, sparse: bool = False,", "sparse csr_matrix or dense ndarray. Note that this feature requires ``scipy`` :param text_attrs:", "not v: # ignore it when the parameter is empty return 0 if", "info: https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int = 256, max_val: int = 65536,", "a fixed-dimensional matrix using the hashing trick. Unlike FeatureHashser, you should only use", "n_dim: the dimensionality of each document in the output embedding. Small numbers of", "v.lower() in {'false', 'no'}: return 0 if isinstance(v, (tuple, dict, list)): v =", "if v.lower() in {'true', 'yes'}: # parse boolean parameter return 1 if v.lower()", "v in doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h) col = h %", "val = val % self.max_val idxs.append((0, col)) val = sign_h * sign_v *", "col = h % self.n_dim val = self._any_hash(v) sign_v = np.sign(val) val =", "dict, list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs:", "This is because the closeness of the value of each feature is meaningless", "* sign_v * val data.append(val) table[col] += val if self.sparse: doc.embedding = csr_matrix(", "if isinstance(v, str): v = v.strip() if v.lower() in {'true', 'yes'}: # parse", "val = self._any_hash(v) sign_v = np.sign(val) val = val % self.max_val idxs.append((0, col))", "of each feature is meaningless it is basically the result of a hash", "0 if isinstance(v, (tuple, dict, list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16)", "except ValueError: try: return float(v) # parse float parameter except ValueError: if not", "with TagsHasher. This is because the closeness of the value of each feature", "hash collisions, but large numbers will cause larger overall parameter dimensions. :param sparse:", "of features are likely to cause hash collisions, but large numbers will cause", "# parse boolean parameter return 1 if v.lower() in {'false', 'no'}: return 0", "searching documents embedded with TagsHasher. This is because the closeness of the value", "attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val self.hash =", "likely to cause hash collisions, but large numbers will cause larger overall parameter", ":param text_attrs: which attributes to be considered as text attributes. :param kwargs: \"\"\"", "\"\"\"Convert an arbitrary set of tags into a fixed-dimensional matrix using the hashing", "value of each feature is meaningless it is basically the result of a", "= val % self.max_val idxs.append((0, col)) val = sign_h * sign_v * val", "should only use Jaccard/Hamming distance when searching documents embedded with TagsHasher. This is", "= np.zeros(self.n_dim) # dense for k, v in doc.tags.items(): h = self._any_hash(k) sign_h", "whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.", "DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags into a fixed-dimensional", "self.sparse: from scipy.sparse import csr_matrix for idx, doc in enumerate(docs): if doc.tags: idxs,", "in enumerate(docs): if doc.tags: idxs, data = [], [] # sparse table =", "document in the output embedding. Small numbers of features are likely to cause", "= np.sign(h) col = h % self.n_dim val = self._any_hash(v) sign_v = np.sign(val)", "import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags into", "int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray, **kwargs): if self.sparse: from scipy.sparse import", "it when the parameter is empty return 0 if isinstance(v, str): v =", "# parse int parameter except ValueError: try: return float(v) # parse float parameter", "self.n_dim val = self._any_hash(v) sign_v = np.sign(val) val = val % self.max_val idxs.append((0,", "the closeness of the value of each feature is meaningless it is basically", "text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val self.hash", "h = self._any_hash(k) sign_h = np.sign(h) col = h % self.n_dim val =", "if isinstance(v, (tuple, dict, list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests", "int = 65536, sparse: bool = False, **kwargs): \"\"\" :param n_dim: the dimensionality", "\"\"\" :param n_dim: the dimensionality of each document in the output embedding. Small", "ValueError: if not v: # ignore it when the parameter is empty return", "return int(v) # parse int parameter except ValueError: try: return float(v) # parse", "features are likely to cause hash collisions, but large numbers will cause larger", "super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val self.hash = hashlib.md5 self.sparse = sparse", "sparse def _any_hash(self, v): try: return int(v) # parse int parameter except ValueError:", "feature requires ``scipy`` :param text_attrs: which attributes to be considered as text attributes.", "**kwargs): if self.sparse: from scipy.sparse import csr_matrix for idx, doc in enumerate(docs): if", "kwargs: \"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val self.hash = hashlib.md5 self.sparse", "when the parameter is empty return 0 if isinstance(v, str): v = v.strip()", "list)): v = json.dumps(v, sort_keys=True) return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16) @requests def encode(self, docs: DocumentArray,", "= h % self.n_dim val = self._any_hash(v) sign_v = np.sign(val) val = val", "numbers of features are likely to cause hash collisions, but large numbers will", "a sparse csr_matrix or dense ndarray. Note that this feature requires ``scipy`` :param", "int(v) # parse int parameter except ValueError: try: return float(v) # parse float", "return float(v) # parse float parameter except ValueError: if not v: # ignore", "if self.sparse: from scipy.sparse import csr_matrix for idx, doc in enumerate(docs): if doc.tags:", "closeness of the value of each feature is meaningless it is basically the", "\"\"\" super().__init__(**kwargs) self.n_dim = n_dim self.max_val = max_val self.hash = hashlib.md5 self.sparse =", "0 if isinstance(v, str): v = v.strip() if v.lower() in {'true', 'yes'}: #", "tags into a fixed-dimensional matrix using the hashing trick. Unlike FeatureHashser, you should", "% self.n_dim val = self._any_hash(v) sign_v = np.sign(val) val = val % self.max_val", "[], [] # sparse table = np.zeros(self.n_dim) # dense for k, v in", "= 256, max_val: int = 65536, sparse: bool = False, **kwargs): \"\"\" :param", "doc.tags.items(): h = self._any_hash(k) sign_h = np.sign(h) col = h % self.n_dim val", "val % self.max_val idxs.append((0, col)) val = sign_h * sign_v * val data.append(val)", "def __init__(self, n_dim: int = 256, max_val: int = 65536, sparse: bool =", "parameter except ValueError: if not v: # ignore it when the parameter is", "requires ``scipy`` :param text_attrs: which attributes to be considered as text attributes. :param", "from scipy.sparse import csr_matrix for idx, doc in enumerate(docs): if doc.tags: idxs, data", "from jina import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set of", "documents embedded with TagsHasher. This is because the closeness of the value of", "as np from jina import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary", "https://en.wikipedia.org/wiki/Feature_hashing \"\"\" def __init__(self, n_dim: int = 256, max_val: int = 65536, sparse:", "jina import Executor, DocumentArray, requests class TagsHasher(Executor): \"\"\"Convert an arbitrary set of tags", "parse int parameter except ValueError: try: return float(v) # parse float parameter except", "v.strip() if v.lower() in {'true', 'yes'}: # parse boolean parameter return 1 if", "attributes to be considered as text attributes. :param kwargs: \"\"\" super().__init__(**kwargs) self.n_dim =" ]
[ "distributed in the hope that it will be useful, # but WITHOUT ANY", "their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml',", "# OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>).", "# GNU Affero General Public License for more details. # # You should", "a large range of modules and actions. When installed, this module creates easy", "to discover OpenERP and configure their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml',", "as # published by the Free Software Foundation, either version 3 of the", "can be evaluated using goals and numerical objectives to reach. **Goals** are assigned", "OpenERP and configure their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml',", "License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {", "through **challenges** to evaluate and compare members of a team with each others", "############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP", "time. For non-numerical achievements, **badges** can be granted to users. From a simple", "members of a team with each others and through time. For non-numerical achievements,", "have received a copy of the GNU Affero General Public License # along", "either version 3 of the # License, or (at your option) any later", "Affero General Public License as # published by the Free Software Foundation, either", "'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application':", "SA (<http://openerp.com>). # # This program is free software: you can redistribute it", "any later version. # # This program is distributed in the hope that", "new users to discover OpenERP and configure their user profile. \"\"\", 'data': [", "users. From a simple \"thank you\" to an exceptional achievement, a badge is", "published by the Free Software Foundation, either version 3 of the # License,", "installed, this module creates easy goals to help new users to discover OpenERP", "\"thank you\" to an exceptional achievement, a badge is an easy way to", "'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application': True, 'auto_install': False, 'qweb': ['static/src/xml/gamification.xml'], }", "'OpenERP SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description':", "users can be evaluated using goals and numerical objectives to reach. **Goals** are", "License as # published by the Free Software Foundation, either version 3 of", "<reponame>diogocs1/comps # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management", "# it under the terms of the GNU Affero General Public License as", "'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application': True, 'auto_install':", "License for more details. # # You should have received a copy of", "evaluate and motivate the users of OpenERP. The users can be evaluated using", "free software: you can redistribute it and/or modify # it under the terms", "# # You should have received a copy of the GNU Affero General", "the GNU Affero General Public License as # published by the Free Software", "you can redistribute it and/or modify # it under the terms of the", "of the GNU Affero General Public License # along with this program. If", "# along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name':", "of a team with each others and through time. For non-numerical achievements, **badges**", "gratitude to a user for their good work. Both goals and badges are", "discover OpenERP and configure their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml',", "their good work. Both goals and badges are flexibles and can be adapted", "'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The Gamification module", "utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C)", "'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process", "achievement, a badge is an easy way to exprimate gratitude to a user", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License", "# published by the Free Software Foundation, either version 3 of the #", "of the GNU Affero General Public License as # published by the Free", "to evaluate and motivate the users of OpenERP. The users can be evaluated", "the # GNU Affero General Public License for more details. # # You", "exceptional achievement, a badge is an easy way to exprimate gratitude to a", "an easy way to exprimate gratitude to a user for their good work.", "Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This program is free software:", "General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #", "'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application': True, 'auto_install': False, 'qweb': ['static/src/xml/gamification.xml'],", "'author': 'OpenERP SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'],", "compare members of a team with each others and through time. For non-numerical", "this module creates easy goals to help new users to discover OpenERP and", "a simple \"thank you\" to an exceptional achievement, a badge is an easy", "Gamification process ==================== The Gamification module provides ways to evaluate and motivate the", "badges are flexibles and can be adapted to a large range of modules", "OpenERP. The users can be evaluated using goals and numerical objectives to reach.", "it under the terms of the GNU Affero General Public License as #", "# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution", "this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version': '1.0',", "is free software: you can redistribute it and/or modify # it under the", "# License, or (at your option) any later version. # # This program", "without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "to a user for their good work. Both goals and badges are flexibles", "be adapted to a large range of modules and actions. When installed, this", "numerical objectives to reach. **Goals** are assigned through **challenges** to evaluate and compare", "General Public License as # published by the Free Software Foundation, either version", "of OpenERP. The users can be evaluated using goals and numerical objectives to", "simple \"thank you\" to an exceptional achievement, a badge is an easy way", "From a simple \"thank you\" to an exceptional achievement, a badge is an", "hope that it will be useful, # but WITHOUT ANY WARRANTY; without even", "a copy of the GNU Affero General Public License # along with this", "2013 OpenERP SA (<http://openerp.com>). # # This program is free software: you can", "-*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013", "3 of the # License, or (at your option) any later version. #", "'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The Gamification module provides ways to", "When installed, this module creates easy goals to help new users to discover", "actions. When installed, this module creates easy goals to help new users to", "-*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution #", "You should have received a copy of the GNU Affero General Public License", "to help new users to discover OpenERP and configure their user profile. \"\"\",", "'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends':", "and compare members of a team with each others and through time. For", "with each others and through time. For non-numerical achievements, **badges** can be granted", "flexibles and can be adapted to a large range of modules and actions.", "and badges are flexibles and can be adapted to a large range of", "module creates easy goals to help new users to discover OpenERP and configure", "of the # License, or (at your option) any later version. # #", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "Foundation, either version 3 of the # License, or (at your option) any", "process ==================== The Gamification module provides ways to evaluate and motivate the users", "are assigned through **challenges** to evaluate and compare members of a team with", "work. Both goals and badges are flexibles and can be adapted to a", "# Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This program is free", "a badge is an easy way to exprimate gratitude to a user for", "good work. Both goals and badges are flexibles and can be adapted to", "details. # # You should have received a copy of the GNU Affero", "\"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml',", "(at your option) any later version. # # This program is distributed in", "objectives to reach. **Goals** are assigned through **challenges** to evaluate and compare members", "This program is distributed in the hope that it will be useful, #", "through time. For non-numerical achievements, **badges** can be granted to users. From a", "program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author':", "program is free software: you can redistribute it and/or modify # it under", "easy way to exprimate gratitude to a user for their good work. Both", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "and configure their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml',", "goals and numerical objectives to reach. **Goals** are assigned through **challenges** to evaluate", "**badges** can be granted to users. From a simple \"thank you\" to an", "Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This program is", "version 3 of the # License, or (at your option) any later version.", "useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #", "the GNU Affero General Public License # along with this program. If not,", "configure their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml',", "(<http://openerp.com>). # # This program is free software: you can redistribute it and/or", "that it will be useful, # but WITHOUT ANY WARRANTY; without even the", "to a large range of modules and actions. When installed, this module creates", "redistribute it and/or modify # it under the terms of the GNU Affero", "# This program is free software: you can redistribute it and/or modify #", "your option) any later version. # # This program is distributed in the", "to users. From a simple \"thank you\" to an exceptional achievement, a badge", ": 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The Gamification", "using goals and numerical objectives to reach. **Goals** are assigned through **challenges** to", "of modules and actions. When installed, this module creates easy goals to help", "'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The Gamification module provides", "achievements, **badges** can be granted to users. From a simple \"thank you\" to", "Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ##############################################################################", "users to discover OpenERP and configure their user profile. \"\"\", 'data': [ 'wizard/update_goal.xml',", "############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human Resources', 'website'", "'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification',", "'1.0', 'author': 'OpenERP SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template',", "provides ways to evaluate and motivate the users of OpenERP. The users can", "should have received a copy of the GNU Affero General Public License #", "an exceptional achievement, a badge is an easy way to exprimate gratitude to", "large range of modules and actions. When installed, this module creates easy goals", "by the Free Software Foundation, either version 3 of the # License, or", "OpenERP SA (<http://openerp.com>). # # This program is free software: you can redistribute", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail',", "each others and through time. For non-numerical achievements, **badges** can be granted to", "are flexibles and can be adapted to a large range of modules and", "{ 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human Resources', 'website' :", "'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application': True, 'auto_install': False,", "Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This program", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "be evaluated using goals and numerical objectives to reach. **Goals** are assigned through", "# You should have received a copy of the GNU Affero General Public", "the # License, or (at your option) any later version. # # This", "later version. # # This program is distributed in the hope that it", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero", "help new users to discover OpenERP and configure their user profile. \"\"\", 'data':", "user for their good work. Both goals and badges are flexibles and can", "users of OpenERP. The users can be evaluated using goals and numerical objectives", "See the # GNU Affero General Public License for more details. # #", "the Free Software Foundation, either version 3 of the # License, or (at", "evaluated using goals and numerical objectives to reach. **Goals** are assigned through **challenges**", "be granted to users. From a simple \"thank you\" to an exceptional achievement,", "it will be useful, # but WITHOUT ANY WARRANTY; without even the implied", "# # This program is distributed in the hope that it will be", "Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ====================", "WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS", "the hope that it will be useful, # but WITHOUT ANY WARRANTY; without", "'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application': True,", "under the terms of the GNU Affero General Public License as # published", "terms of the GNU Affero General Public License as # published by the", "and numerical objectives to reach. **Goals** are assigned through **challenges** to evaluate and", "coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright", "<http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human", "and can be adapted to a large range of modules and actions. When", "adapted to a large range of modules and actions. When installed, this module", "GNU Affero General Public License for more details. # # You should have", "General Public License for more details. # # You should have received a", "with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version':", "==================== The Gamification module provides ways to evaluate and motivate the users of", "the users of OpenERP. The users can be evaluated using goals and numerical", "**challenges** to evaluate and compare members of a team with each others and", "# # This program is free software: you can redistribute it and/or modify", "be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "assigned through **challenges** to evaluate and compare members of a team with each", "Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.", "modules and actions. When installed, this module creates easy goals to help new", "reach. **Goals** are assigned through **challenges** to evaluate and compare members of a", "**Goals** are assigned through **challenges** to evaluate and compare members of a team", "Public License for more details. # # You should have received a copy", "granted to users. From a simple \"thank you\" to an exceptional achievement, a", "can be adapted to a large range of modules and actions. When installed,", "# ############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Human Resources',", "badge is an easy way to exprimate gratitude to a user for their", "see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA', 'category':", "is distributed in the hope that it will be useful, # but WITHOUT", "Public License as # published by the Free Software Foundation, either version 3", "non-numerical achievements, **badges** can be granted to users. From a simple \"thank you\"", "and/or modify # it under the terms of the GNU Affero General Public", "License, or (at your option) any later version. # # This program is", "exprimate gratitude to a user for their good work. Both goals and badges", "creates easy goals to help new users to discover OpenERP and configure their", "Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This", "easy goals to help new users to discover OpenERP and configure their user", "PURPOSE. See the # GNU Affero General Public License for more details. #", "in the hope that it will be useful, # but WITHOUT ANY WARRANTY;", "version. # # This program is distributed in the hope that it will", "module provides ways to evaluate and motivate the users of OpenERP. The users", "SA', 'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\"", "can be granted to users. From a simple \"thank you\" to an exceptional", "Gamification module provides ways to evaluate and motivate the users of OpenERP. The", "'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The", "This program is free software: you can redistribute it and/or modify # it", "'description': \"\"\" Gamification process ==================== The Gamification module provides ways to evaluate and", "Software Foundation, either version 3 of the # License, or (at your option)", "GNU Affero General Public License as # published by the Free Software Foundation,", "received a copy of the GNU Affero General Public License # along with", "along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification',", "Free Software Foundation, either version 3 of the # License, or (at your", "\"\"\" Gamification process ==================== The Gamification module provides ways to evaluate and motivate", "motivate the users of OpenERP. The users can be evaluated using goals and", "to reach. **Goals** are assigned through **challenges** to evaluate and compare members of", "others and through time. For non-numerical achievements, **badges** can be granted to users.", "PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details.", "the terms of the GNU Affero General Public License as # published by", "GNU Affero General Public License # along with this program. If not, see", "The Gamification module provides ways to evaluate and motivate the users of OpenERP.", "# # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA", "and through time. For non-numerical achievements, **badges** can be granted to users. From", "FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for", "for more details. # # You should have received a copy of the", "goals and badges are flexibles and can be adapted to a large range", "it and/or modify # it under the terms of the GNU Affero General", "or (at your option) any later version. # # This program is distributed", "a user for their good work. Both goals and badges are flexibles and", "A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more", "copy of the GNU Affero General Public License # along with this program.", "profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml',", "['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The Gamification module provides ways", "'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml',", "will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty", "you\" to an exceptional achievement, a badge is an easy way to exprimate", "'web_kanban_gauge'], 'description': \"\"\" Gamification process ==================== The Gamification module provides ways to evaluate", "software: you can redistribute it and/or modify # it under the terms of", "to exprimate gratitude to a user for their good work. Both goals and", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public", "and actions. When installed, this module creates easy goals to help new users", "modify # it under the terms of the GNU Affero General Public License", "more details. # # You should have received a copy of the GNU", "Affero General Public License for more details. # # You should have received", "goals to help new users to discover OpenERP and configure their user profile.", "ways to evaluate and motivate the users of OpenERP. The users can be", "For non-numerical achievements, **badges** can be granted to users. From a simple \"thank", "to an exceptional achievement, a badge is an easy way to exprimate gratitude", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "to evaluate and compare members of a team with each others and through", "'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ], 'application': True, 'auto_install': False, 'qweb':", "The users can be evaluated using goals and numerical objectives to reach. **Goals**", "a team with each others and through time. For non-numerical achievements, **badges** can", "option) any later version. # # This program is distributed in the hope", "# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "'category': 'Human Resources', 'website' : 'https://www.odoo.com/page/gamification', 'depends': ['mail', 'email_template', 'web_kanban_gauge'], 'description': \"\"\" Gamification", "Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # #", "not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP SA',", "and motivate the users of OpenERP. The users can be evaluated using goals", "(C) 2013 OpenERP SA (<http://openerp.com>). # # This program is free software: you", "is an easy way to exprimate gratitude to a user for their good", "OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). #", "for their good work. Both goals and badges are flexibles and can be", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General", "can redistribute it and/or modify # it under the terms of the GNU", "Both goals and badges are flexibles and can be adapted to a large", "way to exprimate gratitude to a user for their good work. Both goals", "range of modules and actions. When installed, this module creates easy goals to", "[ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv', 'data/goal_base.xml', 'data/badge.xml', 'views/gamification.xml', ],", "program is distributed in the hope that it will be useful, # but", "# This program is distributed in the hope that it will be useful,", "If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Gamification', 'version': '1.0', 'author': 'OpenERP", "team with each others and through time. For non-numerical achievements, **badges** can be", "user profile. \"\"\", 'data': [ 'wizard/update_goal.xml', 'wizard/grant_badge.xml', 'views/badge.xml', 'views/challenge.xml', 'views/goal.xml', 'data/cron.xml', 'security/gamification_security.xml', 'security/ir.model.access.csv',", "but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "evaluate and compare members of a team with each others and through time." ]
[ "batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims, input_tensor", "time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end))", "rknn.api import RKNN from time import time from utils.img_utils import load_preproc_images from time", "from time import time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps, results", "#print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced", "inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration print('Inference took %f seconds' % (duration))", "% (init_time_end - init_time_start)) # Inference loop total_time, img_cnt = 0.0, 0 while", "init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' % (init_time_end - init_time_start)) # Inference", "+ batch_size] img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor =", "list(), list() init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time()", "time import time from utils.img_utils import load_preproc_images from time import time def inference_rknn(model_path,", "load_preproc_images from time import time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps,", "0 while img_cnt + batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt", "total_time, img_cnt = 0.0, 0 while img_cnt + batch_size <= len(dataset): img_paths =", "import time from utils.img_utils import load_preproc_images from time import time def inference_rknn(model_path, datatype,", "%f seconds' % (duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0]))", "img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for x in input_tensor]", "= list(), list() init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end =", "img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for", "took %f seconds' % (duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]),", "rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' %", "dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor", "% (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration", "seconds' % (init_time_end - init_time_start)) # Inference loop total_time, img_cnt = 0.0, 0", "time() for i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" %", "x in input_tensor] # Inference and append output to results inf_start_time = time()", "took %f seconds' % (init_time_end - init_time_start)) # Inference loop total_time, img_cnt =", "init_time_start)) # Inference loop total_time, img_cnt = 0.0, 0 while img_cnt + batch_size", "batch_size, repeat): timestamps, results = list(), list() init_time_start = time() rknn = RKNN()", "import load_preproc_images from time import time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat):", "% (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration print('Inference took %f seconds'", "timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration print('Inference took %f", "rknn.release() print('Inferenced %d images in %f seconds' % (img_cnt, total_time)) return results, timestamps", "img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype,", "# Inference loop total_time, img_cnt = 0.0, 0 while img_cnt + batch_size <=", "input_tensor] # Inference and append output to results inf_start_time = time() for i", "from utils.img_utils import load_preproc_images from time import time def inference_rknn(model_path, datatype, input_dims, dataset,", "inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration print('Inference took", "import RKNN from time import time from utils.img_utils import load_preproc_images from time import", "in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\"", "% (duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims))", "len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images in %f seconds' % (img_cnt, total_time))", "utils.img_utils import load_preproc_images from time import time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size,", "i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time))", "<reponame>stephanballer/deepedgebench<filename>utils/test/rknn.py<gh_stars>1-10 from rknn.api import RKNN from time import time from utils.img_utils import load_preproc_images", "results.append((out, img_dims)) rknn.release() print('Inferenced %d images in %f seconds' % (img_cnt, total_time)) return", "from time import time from utils.img_utils import load_preproc_images from time import time def", "for x in input_tensor] # Inference and append output to results inf_start_time =", "RKNN from time import time from utils.img_utils import load_preproc_images from time import time", "rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration", "total_time += duration print('Inference took %f seconds' % (duration)) #print(list(filter(lambda x: x >=", "= time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\",", "<= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims, input_tensor =", "append output to results inf_start_time = time() for i in range(repeat): out =", "input_dims) input_tensor = [x for x in input_tensor] # Inference and append output", "time import time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps, results =", "input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for x in input_tensor] #", "= 0.0, 0 while img_cnt + batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt +", "input_dims, dataset, batch_size, repeat): timestamps, results = list(), list() init_time_start = time() rknn", "batch_size] img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x", "loop total_time, img_cnt = 0.0, 0 while img_cnt + batch_size <= len(dataset): img_paths", "inf_end_time-inf_start_time total_time += duration print('Inference took %f seconds' % (duration)) #print(list(filter(lambda x: x", "import time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps, results = list(),", "- init_time_start)) # Inference loop total_time, img_cnt = 0.0, 0 while img_cnt +", "0.0, 0 while img_cnt + batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size]", "load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for x in input_tensor] # Inference and", "input_tensor = [x for x in input_tensor] # Inference and append output to", "= load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for x in input_tensor] # Inference", "= inf_end_time-inf_start_time total_time += duration print('Inference took %f seconds' % (duration)) #print(list(filter(lambda x:", "list() init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\",", "timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time +=", "out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images in %f seconds'", "x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d", "img_cnt = 0.0, 0 while img_cnt + batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt", "duration print('Inference took %f seconds' % (duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])),", "from rknn.api import RKNN from time import time from utils.img_utils import load_preproc_images from", "(img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration print('Inference", "datatype, input_dims, dataset, batch_size, repeat): timestamps, results = list(), list() init_time_start = time()", "Inference and append output to results inf_start_time = time() for i in range(repeat):", "time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' % (init_time_end - init_time_start))", "in input_tensor] # Inference and append output to results inf_start_time = time() for", "time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time", "for i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt),", "rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization", "= rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time))", ">= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images in", "print('Inference took %f seconds' % (duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out),", "= time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' % (init_time_end -", "time from utils.img_utils import load_preproc_images from time import time def inference_rknn(model_path, datatype, input_dims,", "= [x for x in input_tensor] # Inference and append output to results", "(img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time total_time += duration print('Inference took %f seconds' %", "(init_time_end - init_time_start)) # Inference loop total_time, img_cnt = 0.0, 0 while img_cnt", "inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration =", "inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps, results = list(), list() init_time_start =", "init_time_end)) print('Initialization took %f seconds' % (init_time_end - init_time_start)) # Inference loop total_time,", "range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" %", "and append output to results inf_start_time = time() for i in range(repeat): out", "repeat): timestamps, results = list(), list() init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path)", "init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' % (init_time_end", "= time() for i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\"", "+= duration print('Inference took %f seconds' % (duration)) #print(list(filter(lambda x: x >= 0.01,", "= RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took", "batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for x in", "timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' % (init_time_end - init_time_start)) # Inference loop", "%f seconds' % (init_time_end - init_time_start)) # Inference loop total_time, img_cnt = 0.0,", "0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images in %f", "seconds' % (duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out,", "= dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims)", "results inf_start_time = time() for i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time =", "while img_cnt + batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt +=", "+ batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims,", "dataset, batch_size, repeat): timestamps, results = list(), list() init_time_start = time() rknn =", "to results inf_start_time = time() for i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time", "inf_start_time = time() for i in range(repeat): out = rknn.inference(inputs=input_tensor) inf_end_time = time()", "duration = inf_end_time-inf_start_time total_time += duration print('Inference took %f seconds' % (duration)) #print(list(filter(lambda", "output to results inf_start_time = time() for i in range(repeat): out = rknn.inference(inputs=input_tensor)", "datatype, input_dims) input_tensor = [x for x in input_tensor] # Inference and append", "Inference loop total_time, img_cnt = 0.0, 0 while img_cnt + batch_size <= len(dataset):", "+= batch_size img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims) input_tensor = [x for x", "img_cnt + batch_size <= len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size", "out = rknn.inference(inputs=input_tensor) inf_end_time = time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt),", "print('Initialization took %f seconds' % (init_time_end - init_time_start)) # Inference loop total_time, img_cnt", "[x for x in input_tensor] # Inference and append output to results inf_start_time", "results = list(), list() init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end", "def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps, results = list(), list() init_time_start", "# Inference and append output to results inf_start_time = time() for i in", "time def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat): timestamps, results = list(), list()", "= time() timestamps.append((\"inf_start_batch_%d\" % (img_cnt), inf_start_time)) timestamps.append((\"inf_end_batch_%d\" % (img_cnt), inf_end_time)) duration = inf_end_time-inf_start_time", "len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images in %f seconds' %", "img_dims)) rknn.release() print('Inferenced %d images in %f seconds' % (img_cnt, total_time)) return results,", "len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images in %f seconds' % (img_cnt,", "init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start))", "timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds' % (init_time_end - init_time_start)) #", "x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release() print('Inferenced %d images", "rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f seconds'", "timestamps, results = list(), list() init_time_start = time() rknn = RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime()", "RKNN() rknn.load_rknn(path=model_path) rknn.init_runtime() init_time_end = time() timestamps.append((\"init_rknn_start\", init_time_start)) timestamps.append((\"init_rknn_end\", init_time_end)) print('Initialization took %f", "len(dataset): img_paths = dataset[img_cnt:img_cnt + batch_size] img_cnt += batch_size img_dims, input_tensor = load_preproc_images(img_paths,", "(duration)) #print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0])) results.append((out, img_dims)) rknn.release()" ]
[ "the sum N = 3 def test(N): SquareSum = 0 SumSquare = 0", "Find the absolute difference between the sum of the squares of the first", "= 3 def test(N): SquareSum = 0 SumSquare = 0 for i in", "+= i*i SumSquare += i SumSquare = SumSquare * SumSquare return SumSquare -", "SquareSum += i*i SumSquare += i SumSquare = SumSquare * SumSquare return SumSquare", "absolute difference between the sum of the squares of the first natural numbers", "i*i SumSquare += i SumSquare = SumSquare * SumSquare return SumSquare - SquareSum", "sum of the squares of the first natural numbers and the square of", "numbers and the square of the sum N = 3 def test(N): SquareSum", "of the squares of the first natural numbers and the square of the", "of the sum N = 3 def test(N): SquareSum = 0 SumSquare =", "in range(1,N+1): SquareSum += i*i SumSquare += i SumSquare = SumSquare * SumSquare", "of the first natural numbers and the square of the sum N =", "= 0 for i in range(1,N+1): SquareSum += i*i SumSquare += i SumSquare", "i in range(1,N+1): SquareSum += i*i SumSquare += i SumSquare = SumSquare *", "SumSquare += i SumSquare = SumSquare * SumSquare return SumSquare - SquareSum print(test(N))", "0 for i in range(1,N+1): SquareSum += i*i SumSquare += i SumSquare =", "difference between the sum of the squares of the first natural numbers and", "N = 3 def test(N): SquareSum = 0 SumSquare = 0 for i", "and the square of the sum N = 3 def test(N): SquareSum =", "test(N): SquareSum = 0 SumSquare = 0 for i in range(1,N+1): SquareSum +=", "natural numbers and the square of the sum N = 3 def test(N):", "the squares of the first natural numbers and the square of the sum", "squares of the first natural numbers and the square of the sum N", "the first natural numbers and the square of the sum N = 3", "SquareSum = 0 SumSquare = 0 for i in range(1,N+1): SquareSum += i*i", "square of the sum N = 3 def test(N): SquareSum = 0 SumSquare", "first natural numbers and the square of the sum N = 3 def", "= 0 SumSquare = 0 for i in range(1,N+1): SquareSum += i*i SumSquare", "between the sum of the squares of the first natural numbers and the", "the sum of the squares of the first natural numbers and the square", "# Find the absolute difference between the sum of the squares of the", "def test(N): SquareSum = 0 SumSquare = 0 for i in range(1,N+1): SquareSum", "for i in range(1,N+1): SquareSum += i*i SumSquare += i SumSquare = SumSquare", "SumSquare = 0 for i in range(1,N+1): SquareSum += i*i SumSquare += i", "range(1,N+1): SquareSum += i*i SumSquare += i SumSquare = SumSquare * SumSquare return", "the square of the sum N = 3 def test(N): SquareSum = 0", "sum N = 3 def test(N): SquareSum = 0 SumSquare = 0 for", "0 SumSquare = 0 for i in range(1,N+1): SquareSum += i*i SumSquare +=", "the absolute difference between the sum of the squares of the first natural", "3 def test(N): SquareSum = 0 SumSquare = 0 for i in range(1,N+1):" ]
[ "= \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([", "\"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables", "style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket):", "flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] = [t] return tables class OneColumn(PDFTemplate): doctemplatefactory", "import colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph", "= 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing", "Table(rows) t.setStyle(style) tables[key] = [t] return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def", "layout, flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling", "as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors from reportlab.lib.styles import", "def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1),", "import SimpleDocTemplate, Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def", "as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from", "(newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data across in rows with", "from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from", "the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data across in", "except IndexError: # it's an empty list row = [] bucket.append(row) if len(row)", "list row = [] bucket.append(row) if len(row) == 3: # If the row", "it's an empty list row = [] bucket.append(row) if len(row) == 3: #", "3: # If the row is full (has 3 elements already) we make", "colors.gray), ]) tables = {} for key, rows in flowables_buckets.items(): t = Table(rows)", "(has 3 elements already) we make a new row row = [] bucket.append(row)", "(0,0), (-1,0), 1, colors.gray), ]) tables = {} for key, rows in flowables_buckets.items():", "get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown,", "(\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables = {} for key, rows in", "bucket[-1] except IndexError: # it's an empty list row = [] bucket.append(row) if", "flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray),", "OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return", "reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate", "= bucket[-1] except IndexError: # it's an empty list row = [] bucket.append(row)", "style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row = bucket[-1]", "IndexError: # it's an empty list row = [] bucket.append(row) if len(row) ==", "= Table(rows) t.setStyle(style) tables[key] = [t] return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate", "Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style", "BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate):", "stylesheet, bucket): try: row = bucket[-1] except IndexError: # it's an empty list", "import PDFTemplate from reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import", "\"threecolumn_down\", \"Three column layout, flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three", "registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data across in rows with lines separating", "style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry,", "# If the row is full (has 3 elements already) we make a", "column layout, filling data across in rows with lines separating the rows\") registry.register_template(OneColumn,", "pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus", "column layout, flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout,", "{} for key, rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] = [t]", "class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row = bucket[-1] except", "from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import", "row is full (has 3 elements already) we make a new row row", "import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter", "OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet", "if len(row) == 3: # If the row is full (has 3 elements", "try: row = bucket[-1] except IndexError: # it's an empty list row =", "filling data across in rows with lines separating the rows\") registry.register_template(OneColumn, \"onecolumn_withcomments\", \"One", "an empty list row = [] bucket.append(row) if len(row) == 3: # If", "getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet,", "== 3: # If the row is full (has 3 elements already) we", "\"threecolumn_across\", \"Three column layout, filling data across in rows with lines separating the", "pdfbuilder import registry from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter", "doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style", "(entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1),", "tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter =", "[t] return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet()", "= [] bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self,", "styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down the", "stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0),", "= {} for key, rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] =", "for key, rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] = [t] return", "\"Three column layout, filling data across in rows with lines separating the rows\")", "bucket.append(row) if len(row) == 3: # If the row is full (has 3", "]) tables = {} for key, rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style)", "tables[key] = [t] return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles", "import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import", "return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter", "in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] = [t] return tables class OneColumn(PDFTemplate):", "row row = [] bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet))", "(\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1,", "Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal']", "= \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down the page", "elements already) we make a new row row = [] bucket.append(row) data =", "import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate):", "OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors from reportlab.lib.styles", "bucket): try: row = bucket[-1] except IndexError: # it's an empty list row", "row = [] bucket.append(row) if len(row) == 3: # If the row is", "post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1,", "from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import random", "(-1,0), 1, colors.gray), ]) tables = {} for key, rows in flowables_buckets.items(): t", "= [] bucket.append(row) if len(row) == 3: # If the row is full", "rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] = [t] return tables class", "PDFTemplate from reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate,", "from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet from", "flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data", "random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter =", "= getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix,", "BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib", "(-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ])", "class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12", "% (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0),", "already) we make a new row row = [] bucket.append(row) data = \"%s%s\"", "(0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables = {}", "colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables = {} for key, rows", "styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross,", "\"Three column layout, flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column", "new row row = [] bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data,", "pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates", "len(row) == 3: # If the row is full (has 3 elements already)", "import registry from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as", "\"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down the page (newspaper", "(-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables = {} for", "full (has 3 elements already) we make a new row row = []", "t.setStyle(style) tables[key] = [t] return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self):", "from reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table,", "make a new row row = [] bucket.append(row) data = \"%s%s\" % (entry_prefix,", "= TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0),", "def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row = bucket[-1] except IndexError: #", "= [t] return tables class OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles =", "is full (has 3 elements already) we make a new row row =", "the row is full (has 3 elements already) we make a new row", "def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate): def", "tables = {} for key, rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key]", "style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row", "registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\",", "= OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\"", "12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down", "SimpleDocTemplate, Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self):", "get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self,", "from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory =", "# it's an empty list row = [] bucket.append(row) if len(row) == 3:", "layout, filling data across in rows with lines separating the rows\") registry.register_template(OneColumn, \"onecolumn_withcomments\",", "= BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return style class", "styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout,", "data across in rows with lines separating the rows\") registry.register_template(OneColumn, \"onecolumn_withcomments\", \"One column", "= style.fontSize return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try:", "page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data across in rows", "\"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\",", "= getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three", "getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column", "(\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables =", "row = bucket[-1] except IndexError: # it's an empty list row = []", "generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row = bucket[-1] except IndexError: # it's", "styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\",", "across in rows with lines separating the rows\") registry.register_template(OneColumn, \"onecolumn_withcomments\", \"One column layout\")", "empty list row = [] bucket.append(row) if len(row) == 3: # If the", "t = Table(rows) t.setStyle(style) tables[key] = [t] return tables class OneColumn(PDFTemplate): doctemplatefactory =", "bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style", "from pdfbuilder import registry from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import", "we make a new row row = [] bucket.append(row) data = \"%s%s\" %", "BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate", "data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style =", "registry from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate", "def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName = \"Helvetica\" return styles['Heading1']", "import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors from", "[] bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets):", "(0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray),", "a new row row = [] bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry))", "pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate from pdfbuilder.basetemplates import PDFTemplate from reportlab.lib import colors", "doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName =", "getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory", "return style class ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row =", "row = [] bucket.append(row) data = \"%s%s\" % (entry_prefix, str(entry)) row.append(Paragraph(data, stylesheet)) def", "TableStyle, Paragraph import random class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style =", "1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0), 1, colors.gray), ]) tables = {} for key,", "3 elements already) we make a new row row = [] bucket.append(row) data", "key, rows in flowables_buckets.items(): t = Table(rows) t.setStyle(style) tables[key] = [t] return tables", "ThreeColumnAcross(PDFTemplate): def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket): try: row = bucket[-1] except IndexError:", "If the row is full (has 3 elements already) we make a new", "colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import", "str(entry)) row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"),", "row.append(Paragraph(data, stylesheet)) def post_generate_flowables(self, flowables_buckets): style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\",", "class ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize", "style = TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\",", "style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data across in rows with lines", "down the page (newspaper style)\") registry.register_template(ThreeColumnAcross, \"threecolumn_across\", \"Three column layout, filling data across", "return styles['Heading1'] registry.register_template(ThreeColumnDown, \"threecolumn_down\", \"Three column layout, flowing down the page (newspaper style)\")", "ThreeColumnDown(PDFTemplate): doctemplatefactory = BaseDocTemplate def get_stylesheet(self): style = getSampleStyleSheet()['Normal'] style.spaceAfter = style.fontSize return", "reportlab.lib import colors from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle,", "entry, entry_prefix, stylesheet, bucket): try: row = bucket[-1] except IndexError: # it's an", "[] bucket.append(row) if len(row) == 3: # If the row is full (has", "OneColumn(PDFTemplate): doctemplatefactory = OneColumnDocTemplate def get_stylesheet(self): styles = getSampleStyleSheet() styles['Heading1'].spaceAfter = 12 styles['Heading1'].fontName", "TableStyle([ (\"VALIGN\", (0,0), (-1,-1), \"TOP\"), (\"LINEBELOW\", (0,0), (-1,-1), 1, colors.gray), (\"LINEABOVE\", (0,0), (-1,0),", "1, colors.gray), ]) tables = {} for key, rows in flowables_buckets.items(): t =", "reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph import random class", "entry_prefix, stylesheet, bucket): try: row = bucket[-1] except IndexError: # it's an empty" ]
[ "return result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers)", "작성 # 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능 # 3. 조합해서", "# 데코레이터 # 장점 # 1. 중복 제거, 코드 간결, 공통 함수 작성", "# 데코레이터 사용 print('*' * 40, 'Called Decorator -> time_func') print() time_func(1.5) print('*'", "실행 함수명 name = func.__name__ # 함수 매개변수 arg_str = ', '.join(repr(arg) for", "유효성 체크..... -> 공통 기능 # 3. 조합해서 사용 용이 # 단점 #", "et = time.perf_counter() - st # 실행 함수명 name = func.__name__ # 함수", "장점 # 1. 중복 제거, 코드 간결, 공통 함수 작성 # 2. 로깅,", "none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40,", "= time.perf_counter() result = func(*args) # 함수 종료 시간 계산 et = time.perf_counter()", "perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None", "arg_str = ', '.join(repr(arg) for arg in args) # 결과 출력 print('[%0.5fs] %s(%s)", "none_deco1(1.5) print('-' * 40, 'Called None Decorator -> sum_func') print() none_deco2(100, 150, 250,", "함수 작성 # 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능 # 3.", "함수 종료 시간 계산 et = time.perf_counter() - st # 실행 함수명 name", "None Decorator -> time_func') print() none_deco1(1.5) print('-' * 40, 'Called None Decorator ->", "'Called None Decorator -> sum_func') print() none_deco2(100, 150, 250, 300, 350) print() print()", "* 40, 'Called Decorator -> time_func') print() time_func(1.5) print('*' * 40, 'Called Decorator", "def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1 =", "none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator -> time_func') print() none_deco1(1.5) print('-' *", "결과 출력 print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result)) return result", "for arg in args) # 결과 출력 print('[%0.5fs] %s(%s) -> %r' % (et,", "', '.join(repr(arg) for arg in args) # 결과 출력 print('[%0.5fs] %s(%s) -> %r'", "%s(%s) -> %r' % (et, name, arg_str, result)) return result return perf_clocked @perf_clock", "불편 # 데코레이터 실습 import time def perf_clock(func): def perf_clocked(*args): # 함수 시작", "체크..... -> 공통 기능 # 3. 조합해서 사용 용이 # 단점 # 1.", "in args) # 결과 출력 print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str,", "데코레이터 # 장점 # 1. 중복 제거, 코드 간결, 공통 함수 작성 #", "간결, 공통 함수 작성 # 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능", "함수 시작 시간 st = time.perf_counter() result = func(*args) # 함수 종료 시간", "none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator", "time.perf_counter() - st # 실행 함수명 name = func.__name__ # 함수 매개변수 arg_str", "perf_clocked(*args): # 함수 시작 시간 st = time.perf_counter() result = func(*args) # 함수", "사용 용이 # 단점 # 1. 가독성 감소? # 2. 특정 기능에 한정된", "'.join(repr(arg) for arg in args) # 결과 출력 print('[%0.5fs] %s(%s) -> %r' %", "name, arg_str, result)) return result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def", "print('-' * 40, 'Called None Decorator -> sum_func') print() none_deco2(100, 150, 250, 300,", "import time def perf_clock(func): def perf_clocked(*args): # 함수 시작 시간 st = time.perf_counter()", "sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1,", "# 데코레이터 실습 import time def perf_clock(func): def perf_clocked(*args): # 함수 시작 시간", "print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator -> time_func') print() none_deco1(1.5) print('-'", "perf_clock(func): def perf_clocked(*args): # 함수 시작 시간 st = time.perf_counter() result = func(*args)", "-> time_func') print() none_deco1(1.5) print('-' * 40, 'Called None Decorator -> sum_func') print()", "None Decorator -> sum_func') print() none_deco2(100, 150, 250, 300, 350) print() print() #", "250, 300, 350) print() print() # 데코레이터 사용 print('*' * 40, 'Called Decorator", "args) # 결과 출력 print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result))", "= time.perf_counter() - st # 실행 함수명 name = func.__name__ # 함수 매개변수", "(et, name, arg_str, result)) return result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock", "# 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능 # 3. 조합해서 사용", "단점 # 1. 가독성 감소? # 2. 특정 기능에 한정된 함수는 -> 단일", "= func(*args) # 함수 종료 시간 계산 et = time.perf_counter() - st #", "가독성 감소? # 2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이", "심화 # 데코레이터 # 장점 # 1. 중복 제거, 코드 간결, 공통 함수", "300, 350) print() print() # 데코레이터 사용 print('*' * 40, 'Called Decorator ->", "Decorator -> time_func') print() time_func(1.5) print('*' * 40, 'Called Decorator -> sum_func') print()", "st = time.perf_counter() result = func(*args) # 함수 종료 시간 계산 et =", "미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' *", "# 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars)", "* 40, 'Called None Decorator -> time_func') print() none_deco1(1.5) print('-' * 40, 'Called", "print() time_func(1.5) print('*' * 40, 'Called Decorator -> sum_func') print() sum_func(100, 150, 250,", "데코레이터 사용 print('*' * 40, 'Called Decorator -> time_func') print() time_func(1.5) print('*' *", "sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2,", "@perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1", "350) print() print() # 데코레이터 사용 print('*' * 40, 'Called Decorator -> time_func')", "용이 # 단점 # 1. 가독성 감소? # 2. 특정 기능에 한정된 함수는", "함수는 -> 단일 함수로 작성하는 것이 유리 # 3. 디버깅 불편 # 데코레이터", "조합해서 사용 용이 # 단점 # 1. 가독성 감소? # 2. 특정 기능에", "%r' % (et, name, arg_str, result)) return result return perf_clocked @perf_clock def time_func(seconds):", "# 1. 중복 제거, 코드 간결, 공통 함수 작성 # 2. 로깅, 프레임워크,", "= ', '.join(repr(arg) for arg in args) # 결과 출력 print('[%0.5fs] %s(%s) ->", "# 파이썬 심화 # 데코레이터 # 장점 # 1. 중복 제거, 코드 간결,", "# 3. 디버깅 불편 # 데코레이터 실습 import time def perf_clock(func): def perf_clocked(*args):", "파이썬 심화 # 데코레이터 # 장점 # 1. 중복 제거, 코드 간결, 공통", "# 장점 # 1. 중복 제거, 코드 간결, 공통 함수 작성 # 2.", "Decorator -> sum_func') print() none_deco2(100, 150, 250, 300, 350) print() print() # 데코레이터", "perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator -> time_func')", "1. 중복 제거, 코드 간결, 공통 함수 작성 # 2. 로깅, 프레임워크, 유효성", "디버깅 불편 # 데코레이터 실습 import time def perf_clock(func): def perf_clocked(*args): # 함수", "@perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 =", "150, 250, 300, 350) print() print() # 데코레이터 사용 print('*' * 40, 'Called", "'Called Decorator -> time_func') print() time_func(1.5) print('*' * 40, 'Called Decorator -> sum_func')", "Chapter05-04 # 파이썬 심화 # 데코레이터 # 장점 # 1. 중복 제거, 코드", "1. 가독성 감소? # 2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는", "arg in args) # 결과 출력 print('[%0.5fs] %s(%s) -> %r' % (et, name,", "% (et, name, arg_str, result)) return result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds)", "# 함수 종료 시간 계산 et = time.perf_counter() - st # 실행 함수명", "-> 단일 함수로 작성하는 것이 유리 # 3. 디버깅 불편 # 데코레이터 실습", "func.__name__ # 함수 매개변수 arg_str = ', '.join(repr(arg) for arg in args) #", "return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터", "* 40, 'Called None Decorator -> sum_func') print() none_deco2(100, 150, 250, 300, 350)", "# 함수 시작 시간 st = time.perf_counter() result = func(*args) # 함수 종료", "time_func') print() none_deco1(1.5) print('-' * 40, 'Called None Decorator -> sum_func') print() none_deco2(100,", "-> time_func') print() time_func(1.5) print('*' * 40, 'Called Decorator -> sum_func') print() sum_func(100,", "감소? # 2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리", "3. 디버깅 불편 # 데코레이터 실습 import time def perf_clock(func): def perf_clocked(*args): #", "result = func(*args) # 함수 종료 시간 계산 et = time.perf_counter() - st", "종료 시간 계산 et = time.perf_counter() - st # 실행 함수명 name =", "매개변수 arg_str = ', '.join(repr(arg) for arg in args) # 결과 출력 print('[%0.5fs]", "time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func)", "사용 print('*' * 40, 'Called Decorator -> time_func') print() time_func(1.5) print('*' * 40,", "# 단점 # 1. 가독성 감소? # 2. 특정 기능에 한정된 함수는 ->", "함수 매개변수 arg_str = ', '.join(repr(arg) for arg in args) # 결과 출력", "return sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars)", "time.perf_counter() result = func(*args) # 함수 종료 시간 계산 et = time.perf_counter() -", "유리 # 3. 디버깅 불편 # 데코레이터 실습 import time def perf_clock(func): def", "none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator -> time_func') print() none_deco1(1.5)", "name = func.__name__ # 함수 매개변수 arg_str = ', '.join(repr(arg) for arg in", "로깅, 프레임워크, 유효성 체크..... -> 공통 기능 # 3. 조합해서 사용 용이 #", "3. 조합해서 사용 용이 # 단점 # 1. 가독성 감소? # 2. 특정", "print() none_deco1(1.5) print('-' * 40, 'Called None Decorator -> sum_func') print() none_deco2(100, 150,", "특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리 # 3. 디버깅", "단일 함수로 작성하는 것이 유리 # 3. 디버깅 불편 # 데코레이터 실습 import", "func(*args) # 함수 종료 시간 계산 et = time.perf_counter() - st # 실행", "출력 print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result)) return result return", "'Called None Decorator -> time_func') print() none_deco1(1.5) print('-' * 40, 'Called None Decorator", "제거, 코드 간결, 공통 함수 작성 # 2. 로깅, 프레임워크, 유효성 체크..... ->", "공통 기능 # 3. 조합해서 사용 용이 # 단점 # 1. 가독성 감소?", "데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-'", "기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리 # 3. 디버깅 불편", "-> sum_func') print() none_deco2(100, 150, 250, 300, 350) print() print() # 데코레이터 사용", "2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리 # 3.", "= perf_clock(time_func) none_deco2 = perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called", "작성하는 것이 유리 # 3. 디버깅 불편 # 데코레이터 실습 import time def", "2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능 # 3. 조합해서 사용 용이", "def perf_clocked(*args): # 함수 시작 시간 st = time.perf_counter() result = func(*args) #", "time def perf_clock(func): def perf_clocked(*args): # 함수 시작 시간 st = time.perf_counter() result", "시간 st = time.perf_counter() result = func(*args) # 함수 종료 시간 계산 et", "print('*' * 40, 'Called Decorator -> sum_func') print() sum_func(100, 150, 250, 300, 350)", "print() print() # 데코레이터 사용 print('*' * 40, 'Called Decorator -> time_func') print()", "= func.__name__ # 함수 매개변수 arg_str = ', '.join(repr(arg) for arg in args)", "시작 시간 st = time.perf_counter() result = func(*args) # 함수 종료 시간 계산", "시간 계산 et = time.perf_counter() - st # 실행 함수명 name = func.__name__", "# 실행 함수명 name = func.__name__ # 함수 매개변수 arg_str = ', '.join(repr(arg)", "-> 공통 기능 # 3. 조합해서 사용 용이 # 단점 # 1. 가독성", "공통 함수 작성 # 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능 #", "= perf_clock(sum_func) print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator ->", "print() none_deco2(100, 150, 250, 300, 350) print() print() # 데코레이터 사용 print('*' *", "perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용", "- st # 실행 함수명 name = func.__name__ # 함수 매개변수 arg_str =", "print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result)) return result return perf_clocked", "time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2", "40, 'Called None Decorator -> sum_func') print() none_deco2(100, 150, 250, 300, 350) print()", "* 40, 'Called Decorator -> sum_func') print() sum_func(100, 150, 250, 300, 350) print()", "print() # 데코레이터 사용 print('*' * 40, 'Called Decorator -> time_func') print() time_func(1.5)", "Decorator -> time_func') print() none_deco1(1.5) print('-' * 40, 'Called None Decorator -> sum_func')", "result)) return result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return", "print('-' * 40, 'Called None Decorator -> time_func') print() none_deco1(1.5) print('-' * 40,", "print('*' * 40, 'Called Decorator -> time_func') print() time_func(1.5) print('*' * 40, 'Called", "40, 'Called Decorator -> time_func') print() time_func(1.5) print('*' * 40, 'Called Decorator ->", "# 2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리 #", "코드 간결, 공통 함수 작성 # 2. 로깅, 프레임워크, 유효성 체크..... -> 공통", "함수로 작성하는 것이 유리 # 3. 디버깅 불편 # 데코레이터 실습 import time", "중복 제거, 코드 간결, 공통 함수 작성 # 2. 로깅, 프레임워크, 유효성 체크.....", "데코레이터 실습 import time def perf_clock(func): def perf_clocked(*args): # 함수 시작 시간 st", "print(none_deco1, none_deco1.__code__.co_freevars) print(none_deco2, none_deco2.__code__.co_freevars) print('-' * 40, 'Called None Decorator -> time_func') print()", "# Chapter05-04 # 파이썬 심화 # 데코레이터 # 장점 # 1. 중복 제거,", "def sum_func(*numbers): return sum(numbers) # 데코레이터 미사용 none_deco1 = perf_clock(time_func) none_deco2 = perf_clock(sum_func)", "arg_str, result)) return result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers):", "기능 # 3. 조합해서 사용 용이 # 단점 # 1. 가독성 감소? #", "것이 유리 # 3. 디버깅 불편 # 데코레이터 실습 import time def perf_clock(func):", "st # 실행 함수명 name = func.__name__ # 함수 매개변수 arg_str = ',", "# 함수 매개변수 arg_str = ', '.join(repr(arg) for arg in args) # 결과", "실습 import time def perf_clock(func): def perf_clocked(*args): # 함수 시작 시간 st =", "40, 'Called None Decorator -> time_func') print() none_deco1(1.5) print('-' * 40, 'Called None", "# 3. 조합해서 사용 용이 # 단점 # 1. 가독성 감소? # 2.", "# 1. 가독성 감소? # 2. 특정 기능에 한정된 함수는 -> 단일 함수로", "한정된 함수는 -> 단일 함수로 작성하는 것이 유리 # 3. 디버깅 불편 #", "result return perf_clocked @perf_clock def time_func(seconds): time.sleep(seconds) @perf_clock def sum_func(*numbers): return sum(numbers) #", "sum_func') print() none_deco2(100, 150, 250, 300, 350) print() print() # 데코레이터 사용 print('*'", "계산 et = time.perf_counter() - st # 실행 함수명 name = func.__name__ #", "함수명 name = func.__name__ # 함수 매개변수 arg_str = ', '.join(repr(arg) for arg", "none_deco2(100, 150, 250, 300, 350) print() print() # 데코레이터 사용 print('*' * 40,", "time_func(1.5) print('*' * 40, 'Called Decorator -> sum_func') print() sum_func(100, 150, 250, 300,", "time_func') print() time_func(1.5) print('*' * 40, 'Called Decorator -> sum_func') print() sum_func(100, 150,", "프레임워크, 유효성 체크..... -> 공통 기능 # 3. 조합해서 사용 용이 # 단점", "# 결과 출력 print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result)) return", "def perf_clock(func): def perf_clocked(*args): # 함수 시작 시간 st = time.perf_counter() result =", "-> %r' % (et, name, arg_str, result)) return result return perf_clocked @perf_clock def" ]
[ "return images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last gradient for", "states for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0]", "= 100 max_length = 0 train = {} validation = {} for seed", "are the location of this max value in the gradient image. \"\"\" path", "model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as f: last_line =", "in this experiment that succeeded in this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment)", "seeds: result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) <", "result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result) > 0:", "checkpoints for checkpoint in checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length", "the success rate for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with", "+ '*.log') regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs", "= [] validation_losses = [] for line in lines: match = regex.match(line) if", "length of 1, and is equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def", "seed. experiment and seed have to be passed. if option == 'episode' -", "+ '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs", "max_length = max(max_length, len(result[seed])) if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length", "f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = []", "DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' + '.log'", "\"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f: lines = f.readlines()", "with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed, checkpoint,", "all episodes for given model experiment, seed, and checkpoint have to be passed.", "= 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping from a json", "model experiment, seed, and checkpoint have to be passed. \"\"\" if option ==", "seed, step): \"\"\"Gets a 0-1 array of shape (episodes) where episodes is the", "costs for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs is", "+= one_hot return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an", "== 'episode' - returns all episodes for given model experiment, seed, and checkpoint", "+ f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = [] for log in logs: m", "\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths))", "glob import glob import torch import pandas import re import json from functools", "= list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it", "seed, step): \"\"\"get the success rate for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment,", "total losses for a given experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed)", "= np.array(steps) return steps, result else: return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed):", "episode failed, and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes", "open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode):", "len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0,", "image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in sorted(glob(image_paths)): with open(image_path, 'rb')", "json.load(f) return x @staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images for", "= DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in sorted(glob(gradient_paths)): with", "[] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) < len(checkpoints): steps =", "else: return (value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path", "DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) < len(checkpoints): steps = checkpoints for checkpoint", "\"\"\"Retuns a path to the eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment]", "+ 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert", "for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' + path[1]", "in lines: match = regex.match(line) if match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return", "\"\"\" Returns an array of states for given model for all episodes\"\"\" path", "evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths =", "f: x = json.load(f) return x @staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get", "import pandas import re import json from functools import lru_cache import imageio EPISODES", "regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths", "= np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for seed in result]) steps =", "for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] +", "simulator images for a given model evaluation on a given episode\"\"\" path =", "@staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an array of data frames", "= states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states) return", "episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as f: lines", "model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths", "seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed in seeds: checkpoints =", "\"\"\" if option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/'", "i in range(len(states)): episode_states = states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states", "[] validation_losses = [] for line in lines: match = regex.match(line) if match:", "lru_cache import imageio EPISODES = 561 class DataReader: \"\"\"Container class for the static", "in the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths =", "the data about experiments. This includes reading logs to parse success cases, reading", "= [] min_length = 100 max_length = 0 train = {} validation =", "def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array of costs for given model", "{} steps = [] min_length = 100 max_length = 0 for seed in", "= re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = [] validation_losses = [] for line", "DataFrame, one per episode costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist())", "'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping from a json file", "for selected option. Depending on option, returns: if option == 'seed' - returns", "-= middle_x y = mx_index % image.shape[1] y -= middle_y if value ==", "[] validation_means = [] validation_stds = [] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key])))", "@staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation total losses for a", "= [] min_length = 100 max_length = 0 for seed in seeds: result[seed]", "success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success) success = success -", "of speeds for given model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode -", "states_path = states_paths[0] states = torch.load(states_path) result = [] for i in range(len(states)):", "and speed. \"\"\" import numpy as np from glob import glob import torch", "to be passed. if option == 'episode' - returns all episodes for given", "path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = [] for log in logs:", "a path to the eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex", "(episodes) where episodes is the number of episodes, where Ith value is the", "of 1, and is equal to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed):", "@staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for selected option.", "the ith episode failed, and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step,", "step are generated for seeds values = list(set(values)) values.sort() return values @staticmethod def", "and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed,", "= path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths =", "m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') # log files for each step", "and episode Returns: (value, x, y) - tuple, where value is the max", "= {} steps = [] min_length = 100 max_length = 0 for seed", "'.model.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths for {regex} is", "= states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1))", "value = image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y = image.shape[1] / 2", "= DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as f: lines = f.readlines() regex", "x = json.load(f) return x @staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator", "experiment, seed) for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success", "episode Returns: (value, x, y) - tuple, where value is the max value", "with open(log_file, 'r') as f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate =", "episode): \"\"\"Get gradients for a given model evaluation on a given episode\"\"\" path", "sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed,", "= DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success) success = success - 1", "is the max value of the gradient, x, y are the location of", "0: print( f'costs_paths for {regex} is {costs_paths} and it\\'s length is not 1')", "with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f) return x @staticmethod def get_images(experiment,", "return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array of", "y -= middle_y if value == 0: return (0, 0, 0) else: return", "outcome == 0, returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with", "def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an array of data frames with", "1) success = np.array(success) success = success - 1 one_hot = np.zeros((len(success), EPISODES))", "array of speeds for given model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode", "torch import pandas import re import json from functools import lru_cache import imageio", "it correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an", "get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last gradient for the model and episode", "seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an", "len(checkpoints): steps = checkpoints for checkpoint in checkpoints: success = DataReader.get_success_rate( experiment, seed,", "experiment that succeeded in this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result =", "functools import lru_cache import imageio EPISODES = 561 class DataReader: \"\"\"Container class for", "for image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod", "path to the eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex =", "validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod", "returns successful episodes, if outcome == 0, returns failing episodes. \"\"\" path =", "and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f: lines", "max(max_length, len(result[seed])) if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])),", "of costs for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex =", "the costs for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs", "result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']): train.setdefault(step,", "imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y", "success = np.array(success) success = success - 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)),", "DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns", "a given experiment, for all episodes checks performance of all the models with", "seed, checkpoint): \"\"\" Returns an array of states for given model for all", "path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if", "image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def", "regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths", "paths = glob(regex) assert len(paths) == 1, \\ f'paths for {regex} is not", "f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return", "'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option ==", "2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each seed", "validation total losses for a given experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment,", "return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all episodes checks", "experiment) result = {} steps = [] min_length = 100 max_length = 0", "that succeeded in this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES)", "steps = [] min_length = 100 max_length = 0 train = {} validation", "DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed]))", "the number of seeds, and checkpints is the number of checkpoints. \"\"\" seeds", "return images @staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for a given", "seed have to be passed. if option == 'episode' - returns all episodes", "(0, max_length - len(result[seed])), 'edge') for seed in result]) steps = np.array(steps) return", "and checkpoints, and returns an array of shape (episodes) where episodes is the", "images @staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for a given model", "checkpoint): \"\"\" Returns an array of speeds for given model for all episodes\"\"\"", "+ '.model.costs' costs_paths = glob(regex) if len(costs_paths) == 0: print( f'costs_paths for {regex}", "= f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = [] validation_losses =", "= DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) < len(checkpoints): steps = checkpoints for", "get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an array of speeds for given model", "for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1]", "have to be passed. \"\"\" if option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs", "eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/'", "\"\"\"get the success rate for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step)", "{regex} is not length of 1, and is equal to {paths}' return paths[0]", "open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = []", "for all episodes checks performance of all the models with all possible seeds", "validation = {} for seed in seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment,", "result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array of states", "json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f) return", "line in lines: match = regex.match(line) if match: if int(match.group(2)) == outcome: result.append(int(match.group(1)))", "{regex} is {states_paths} and it\\'s length is not 1' states_path = states_paths[0] states", "= regex.match(line) if match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod def", "contain {option}') # log files for each step are generated for seeds values", "a given model. If outcome == 1, returns successful episodes, if outcome ==", "images = [] for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read())", "values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') # log files for each step are", "= DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' +", "seeds = DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length = 100", "is it correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns", "path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as f: lines = f.readlines()", "data about experiments. This includes reading logs to parse success cases, reading images,", "assert len(states_paths) == 1, \\ f'states_paths for {regex} is {states_paths} and it\\'s length", "0, 0) else: return (value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns", "= DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in sorted(glob(image_paths)): with", "(0, 0, 0) else: return (value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step):", "for each seed for the given experiment across all checkpoints. The resulting shape", "validation_means = [] validation_stds = [] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key])))", "= 1 one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result += one_hot return", "import lru_cache import imageio EPISODES = 561 class DataReader: \"\"\"Container class for the", "returns all checkpoints for given experiment and seed. experiment and seed have to", "f'costs_paths for {regex} is {costs_paths} and it\\'s length is not 1') return None", "with open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps =", "for {regex} is {costs_paths} and it\\'s length is not 1') return None else:", "result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array of shape (episodes)", "to parse success cases, reading images, costs and speed. \"\"\" import numpy as", "1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all episodes", "dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed',", "= DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed,", "per episode costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost", "= image.shape[0] / 2 middle_y = image.shape[1] / 2 x = mx_index //", "match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result", "not 1') return None else: raw_costs = torch.load(costs_paths[0]) # list of DataFrame, one", "= f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as", "= [] for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return", "glob(regex) if len(costs_paths) == 0: print( f'costs_paths for {regex} is {costs_paths} and it\\'s", "regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0]", "\"\"\"Get simulator images for a given model evaluation on a given episode\"\"\" path", "checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result) >", "= DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length = 100 max_length", "= [] for i in range(len(states)): episode_states = states[i] episode_states = list(map(lambda x:", "re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in lines: match = regex.match(line) if match:", "sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0]", "images = [] for image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read())", "given model experiment, seed, and checkpoint have to be passed. \"\"\" if option", "states_paths = glob(regex) assert len(states_paths) == 1, \\ f'states_paths for {regex} is {states_paths}", "== outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1", "log files for each step are generated for seeds values = list(set(values)) values.sort()", "in seeds: result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps)", "open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed, checkpoint, episode):", "of speeds for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex =", "static methods which can be used to access the data about experiments. This", "for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1]", "on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png'", "is not None: return costs[episode - 1] else: return None @staticmethod @lru_cache(maxsize=10) def", "open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f) return x @staticmethod def get_images(experiment, seed,", "get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array of shape (episodes) where episodes is", "costs is not None: return costs[episode - 1] else: return None @staticmethod @lru_cache(maxsize=10)", "data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments", "glob import torch import pandas import re import json from functools import lru_cache", "checkpoint=None): \"\"\"Returns possible values for selected option. Depending on option, returns: if option", "+ 'planning_results/' + path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint':", "f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment]", "-= middle_y if value == 0: return (0, 0, 0) else: return (value,", "states = torch.load(states_path) result = [] for i in range(len(states)): episode_states = states[i]", "def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an array of speeds for given", "for given experiment. experiment has to passed. if option == 'checkpoint' - returns", "path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for", "1] else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an", "key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds),", "np.zeros(EPISODES) result[successes] = 1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment,", "DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log') regexp", "for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds", "@lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array of speeds for given", "EPISODES = 561 class DataReader: \"\"\"Container class for the static data access methods\"\"\"", "Returns an array of speeds for given model and given episode\"\"\" return DataReader.get_model_speeds(experiment,", "open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = []", "= np.array(successes) - 1 result = np.zeros(EPISODES) result[successes] = 1 return result @staticmethod", "elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' +", "result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') # log files for", "for seeds values = list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment, seed, step):", "in the result is 0 if the ith episode failed, and 1 otherwise.", "step) with open(log_file, 'r') as f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate", "def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images for a given model evaluation", "has to passed. if option == 'checkpoint' - returns all checkpoints for given", "return x @staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images for a", "Returns: (value, x, y) - tuple, where value is the max value of", "path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex)", "if m: result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') # log", "checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length", "in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment,", "def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last gradient for the model and", "seed, step, 1) successes = np.array(successes) - 1 result = np.zeros(EPISODES) result[successes] =", "seed, step, outcome): \"\"\"Gets episodes with given outcome for a given model. If", "lines: match = regex.match(line) if match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result", "def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation total losses for a given", "logs to parse success cases, reading images, costs and speed. \"\"\" import numpy", "paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for selected", "\\ f'paths for {regex} is not length of 1, and is equal to", "checkpoints for given experiment and seed. experiment and seed have to be passed.", "= np.squeeze(one_hot) result += one_hot return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode):", "0 for seed in seeds: result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment,", "len(result[seed])), 'edge') for seed in result]) steps = np.array(steps) return steps, result else:", "checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) < len(checkpoints): steps = checkpoints", "1) successes = np.array(successes) - 1 result = np.zeros(EPISODES) result[successes] = 1 return", "eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/'", "one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot, axis=0), one_hot", "Returns an array of speeds for given model for all episodes\"\"\" path =", "an array of states for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment]", "for each step are generated for seeds values = list(set(values)) values.sort() return values", "0: return (0, 0, 0) else: return (value, x, y) @staticmethod def get_evaluation_log_file(experiment,", "number of episodes. Ith value in the result is 0 if the ith", "if outcome == 0, returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step)", "value in the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths", "get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to the eval logs for given model\"\"\"", "train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result", "includes reading logs to parse success cases, reading images, costs and speed. \"\"\"", "def get_training_log_file(experiment, seed): \"\"\"Retuns a path to the eval logs for given model\"\"\"", "files for each step are generated for seeds values = list(set(values)) values.sort() return", "np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot)", "experiment and seed have to be passed. if option == 'episode' - returns", "torch.load(costs_paths[0]) # list of DataFrame, one per episode costs = [pandas.DataFrame(cost if type(cost)", "= 0 for seed in seeds: result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint',", "value in the result is 0 if the ith episode failed, and 1", "< len(checkpoints): steps = checkpoints for checkpoint in checkpoints: success = DataReader.get_success_rate( experiment,", "is the number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = {}", "match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses,", "and seed have to be passed. if option == 'episode' - returns all", "torch.load(states_path) result = [] for i in range(len(states)): episode_states = states[i] episode_states =", "0) else: return (value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a", "in this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed", "be passed. \"\"\" if option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0]", "to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible", "= mx_index % image.shape[1] y -= middle_y if value == 0: return (0,", "\"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f) return x @staticmethod def", "'r') as f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses", "if match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed,", "for given experiment and seed. experiment and seed have to be passed. if", "each step are generated for seeds values = list(set(values)) values.sort() return values @staticmethod", "lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = [] validation_losses", "return (value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to", "= path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths =", "seed, checkpoint, episode): \"\"\"Get gradients for a given model evaluation on a given", "given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images =", "steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment)", "= last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get", "is the number of seeds, and checkpints is the number of checkpoints. \"\"\"", "max_length - len(result[seed])), 'edge') for seed in result]) steps = np.array(steps) return steps,", "the location of this max value in the gradient image. \"\"\" path =", "= states_paths[0] states = torch.load(states_path) result = [] for i in range(len(states)): episode_states", "result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for seed in result]) steps", "seed) for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = []", "return result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with given outcome", "class with static methods which can be used to access the data about", "/ 2 x = mx_index // image.shape[1] x -= middle_x y = mx_index", "the training and validation total losses for a given experiment and seed. \"\"\"", "seed, checkpoint, episode): \"\"\" Returns an array of speeds for given model and", "array of costs for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex", "= [] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict(", "have to be passed. if option == 'episode' - returns all episodes for", "= [] train_stds = [] validation_means = [] validation_stds = [] for key", "\"\"\"Container class for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1)", "m: result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') # log files", "def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array of states for given model", "Returns an array of states for given model for all episodes\"\"\" path =", "the last gradient for the model and episode Returns: (value, x, y) -", "checkpoints, and returns an array of shape (episodes) where episodes is the number", "one_hot return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an array", "if option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' +", "= f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) == 0: return (0, 0, 0)", "@staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array of shape (episodes) where", "[] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()),", "gradient, x, y are the location of this max value in the gradient", "episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is", "the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'", "else: print(f'{log} doesn\\'t contain {option}') # log files for each step are generated", "import imageio EPISODES = 561 class DataReader: \"\"\"Container class for the static data", "+ \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert len(paths) == 1, \\", "result[successes] = 1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for", "= checkpoints for checkpoint in checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success)", "parse success cases, reading images, costs and speed. \"\"\" import numpy as np", "images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last gradient for the", "episodes is the number of episodes. Ith value in the result is 0", "which can be used to access the data about experiments. This includes reading", "seed for the given experiment across all checkpoints. The resulting shape of the", "resulting shape of the np array is (seeds, checkpoints), where seeds is the", "on option, returns: if option == 'seed' - returns all seeds for given", "steps = [] train_losses = [] validation_losses = [] for line in lines:", "for given model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod", "seed in result]) steps = np.array(steps) return steps, result else: return None, None", "from glob import glob import torch import pandas import re import json from", "f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if len(costs_paths) == 0: print( f'costs_paths for", "2 middle_y = image.shape[1] / 2 x = mx_index // image.shape[1] x -=", "'.model.states' states_paths = glob(regex) assert len(states_paths) == 1, \\ f'states_paths for {regex} is", "all the costs for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if", "and it\\'s length is not 1') return None else: raw_costs = torch.load(costs_paths[0]) #", "the number of episodes, where Ith value is the number of models in", "success - 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot =", "return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path to the eval logs", "result = np.zeros(EPISODES) result[successes] = 1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a", "checkpoints. The resulting shape of the np array is (seeds, checkpoints), where seeds", "given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] +", "and returns an array of shape (episodes) where episodes is the number of", "experiment, seed) if len(steps) < len(checkpoints): steps = checkpoints for checkpoint in checkpoints:", "= {} for seed in seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed)", "0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index]", "for seed in seeds: result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed)", "y are the location of this max value in the gradient image. \"\"\"", "for {regex} is {states_paths} and it\\'s length is not 1' states_path = states_paths[0]", "@staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result = {} steps = []", "seed, checkpoint, episode): \"\"\"Get simulator images for a given model evaluation on a", "for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means,", "+ f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode': path =", "[]).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds = [] validation_means = [] validation_stds", "result else: return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and", "glob(path[0] + 'planning_results/' + path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif option ==", "states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) #", "f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths for", "np from glob import glob import torch import pandas import re import json", "== 'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] +", "r\"seed=(\\d+)-\" elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/'", "+ 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if", "seed in seeds: result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if", "def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with given outcome for a given", "access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping", "glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif", "each seed for the given experiment across all checkpoints. The resulting shape of", "and is equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None,", "failed, and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes =", "for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod", "def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each seed for the given experiment", "DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) == 0: return (0,", "sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed,", "path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + '*.log') regexp", "checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment,", "step, 1) successes = np.array(successes) - 1 result = np.zeros(EPISODES) result[successes] = 1", "model and episode Returns: (value, x, y) - tuple, where value is the", "of data frames with all the costs for given evaluation \"\"\" costs =", "+ path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert len(paths) ==", ") return result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with given", "f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return success_rate @staticmethod def", "[] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step,", "else: return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation", "result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array of", "path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex)", "is the number of episodes. Ith value in the result is 0 if", "\"\"\" seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed in seeds: checkpoints", "and validation total losses for a given experiment and seed. \"\"\" path =", "100 max_length = 0 for seed in seeds: result[seed] = [] checkpoints =", "shape (episodes) where episodes is the number of episodes. Ith value in the", "about experiments. This includes reading logs to parse success cases, reading images, costs", "y = mx_index % image.shape[1] y -= middle_y if value == 0: return", "result = {} steps = [] min_length = 100 max_length = 0 for", "= [] validation_means = [] validation_stds = [] for key in train: train_means.append(float(np.mean(train[key])))", "return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an array of", "seed, checkpoint, episode): \"\"\" Returns an array of data frames with all the", "range(len(states)): episode_states = states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states)", "result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all episodes checks performance", "given experiment and seed. experiment and seed have to be passed. if option", "EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping from a", "{} validation = {} for seed in seeds: result[seed] = [] curves =", "= DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length = max(max_length,", "in range(len(states)): episode_states = states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states =", "regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = [] validation_losses = [] for", "@lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for selected option. Depending", "def get_success_rate(experiment, seed, step): \"\"\"get the success rate for a given model\"\"\" log_file", "+ path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint': path =", "== 'checkpoint' - returns all checkpoints for given experiment and seed. experiment and", "Depending on option, returns: if option == 'seed' - returns all seeds for", "seed, checkpoint, 1) success = np.array(success) success = success - 1 one_hot =", "DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in sorted(glob(gradient_paths)): with open(image_path,", "\"\"\" Returns an array of data frames with all the costs for given", "len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for seed", "is {states_paths} and it\\'s length is not 1' states_path = states_paths[0] states =", "DataReader: \"\"\"Container class for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod", "(0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image) value", "where episodes is the number of episodes. Ith value in the result is", "x, y are the location of this max value in the gradient image.", "'r') as f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for", "in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in checkpoints: success", "experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f:", "all episodes checks performance of all the models with all possible seeds and", "gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images", "int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a", "mx_index % image.shape[1] y -= middle_y if value == 0: return (0, 0,", "experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if", "train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), )", "number of seeds, and checkpints is the number of checkpoints. \"\"\" seeds =", "episode costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in", "len(steps) < len(checkpoints): steps = checkpoints for checkpoint in checkpoints: success = DataReader.get_success_rate(", "for {regex} is not length of 1, and is equal to {paths}' return", "> 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for seed in", "for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as", "+ 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each", "experiment. experiment has to passed. if option == 'checkpoint' - returns all checkpoints", "checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an array", "one per episode costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for", "elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' +", "= f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return success_rate @staticmethod", "the models with all possible seeds and checkpoints, and returns an array of", "max value in the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1]", "checkpoints), where seeds is the number of seeds, and checkpints is the number", "- len(result[seed])), 'edge') for seed in result]) steps = np.array(steps) return steps, result", "get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length =", "images for a given model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0]", "pandas import re import json from functools import lru_cache import imageio EPISODES =", "= float(last_line[(last_colon + 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays", "None else: raw_costs = torch.load(costs_paths[0]) # list of DataFrame, one per episode costs", "numpy as np from glob import glob import torch import pandas import re", "1' states_path = states_paths[0] states = torch.load(states_path) result = [] for i in", "the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads", "a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as f:", "axis=0), one_hot = np.squeeze(one_hot) result += one_hot return result @staticmethod def get_episode_speeds(experiment, seed,", "of the np array is (seeds, checkpoints), where seeds is the number of", "for line in lines: match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result", "\"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes) - 1 result", "success = success - 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1", "= DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' +", "returns: if option == 'seed' - returns all seeds for given experiment. experiment", "checkpoint, episode): \"\"\" Returns an array of speeds for given model and given", "log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as f: last_line = f.readlines()[-1]", "x -= middle_x y = mx_index % image.shape[1] y -= middle_y if value", "= image.shape[1] / 2 x = mx_index // image.shape[1] x -= middle_x y", "returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as", "= [] for line in lines: match = regex.match(line) if match: if int(match.group(2))", "if len(costs_paths) == 0: print( f'costs_paths for {regex} is {costs_paths} and it\\'s length", "== 'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1] +", "+ \\ f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert len(paths) == 1, \\", "'.model.costs' costs_paths = glob(regex) if len(costs_paths) == 0: print( f'costs_paths for {regex} is", "methods which can be used to access the data about experiments. This includes", "DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in", "seeds for given experiment. experiment has to passed. if option == 'checkpoint' -", "= np.zeros(EPISODES) result[successes] = 1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given", "path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment]", "'rb') as f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get", "for a given model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name", "result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with given outcome for", "checkpints is the number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result =", "'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*')", "all checkpoints for given experiment and seed. experiment and seed have to be", "regex = path[0] + 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' + '.log' paths", "'.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths for {regex} is", "f'paths for {regex} is not length of 1, and is equal to {paths}'", "equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns", "result = [] for line in lines: match = regex.match(line) if match: if", "{costs_paths} and it\\'s length is not 1') return None else: raw_costs = torch.load(costs_paths[0])", "1, \\ f'paths for {regex} is not length of 1, and is equal", "if option == 'checkpoint' - returns all checkpoints for given experiment and seed.", "seed): \"\"\"Gets the training and validation total losses for a given experiment and", "max value of the gradient, x, y are the location of this max", "Returns an array of data frames with all the costs for given evaluation", "seed) if len(steps) < len(checkpoints): steps = checkpoints for checkpoint in checkpoints: success", "costs_paths = glob(regex) if len(costs_paths) == 0: print( f'costs_paths for {regex} is {costs_paths}", "= DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images)", "+ '.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths for {regex}", "passed. if option == 'checkpoint' - returns all checkpoints for given experiment and", "train_losses = [] validation_losses = [] for line in lines: match = regex.match(line)", "x = mx_index // image.shape[1] x -= middle_x y = mx_index % image.shape[1]", "costs for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0]", "= {} validation = {} for seed in seeds: result[seed] = [] curves", "array of states for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex", "return steps, result else: return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the", "561 class DataReader: \"\"\"Container class for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE =", "min_length = min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result) > 0: result", "+ \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert len(states_paths) == 1, \\", "regexp = r'model/ep(\\d+)' values = [] for log in logs: m = re.search(regexp,", "as f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line", "= np.zeros(EPISODES) for seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for", "= re.search(regexp, log) if m: result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain", "rate arrays for each seed for the given experiment across all checkpoints. The", "def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all episodes checks performance of all", "given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not None:", "@staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array of speeds for", "in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds = [] validation_means", "def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array of shape (episodes) where episodes", "path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert len(states_paths) == 1,", "@lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array of states for given", "return costs[episode - 1] else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint):", "seeds values = list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get", "if len(images) == 0: return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image =", "checks performance of all the models with all possible seeds and checkpoints, and", "values = list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the", "glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = []", "to access the data about experiments. This includes reading logs to parse success", "= imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0] / 2", "= image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y = image.shape[1] / 2 x", "result = {} steps = [] min_length = 100 max_length = 0 train", "sorted(glob(gradient_paths)) if len(images) == 0: return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image", "- 1 result = np.zeros(EPISODES) result[successes] = 1 return result @staticmethod def get_episodes_success_counts(experiment):", "path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' +", "length of 1, and is equal to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment,", "= max(max_length, len(result[seed])) if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length -", "If outcome == 1, returns successful episodes, if outcome == 0, returns failing", "in seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in", "= torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return result @staticmethod @lru_cache(maxsize=10) def", "path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}'", "= DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path", "and seed. experiment and seed have to be passed. if option == 'episode'", "if len(steps) < len(checkpoints): steps = checkpoints for checkpoint in checkpoints: success =", "= glob(path[0] + 'planning_results/' + path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif option", "cases, reading images, costs and speed. \"\"\" import numpy as np from glob", "one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result += one_hot return result @staticmethod", "for given model experiment, seed, and checkpoint have to be passed. \"\"\" if", "@lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\"", "\\ f'states_paths for {regex} is {states_paths} and it\\'s length is not 1' states_path", "re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = [] validation_losses = [] for line in", "of states for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex =", "frames with all the costs for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed,", "+ \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if len(costs_paths) == 0: print(", "not 1' states_path = states_paths[0] states = torch.load(states_path) result = [] for i", "import json from functools import lru_cache import imageio EPISODES = 561 class DataReader:", "len(costs_paths) == 0: print( f'costs_paths for {regex} is {costs_paths} and it\\'s length is", "Returns an array of costs for given model for all episodes\"\"\" path =", "seed, step) with open(log_file, 'r') as f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':')", "DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means =", "image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x", "model. If outcome == 1, returns successful episodes, if outcome == 0, returns", "with all possible seeds and checkpoints, and returns an array of shape (episodes)", "data frames with all the costs for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment,", "= f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as", "DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in sorted(glob(image_paths)): with open(image_path,", "= DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not None: return costs[episode - 1]", "i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds =", "= success - 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot", "experiments mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f:", "raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array", "= f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in lines: match", "'r') as f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon +", "cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns", "== 1, returns successful episodes, if outcome == 0, returns failing episodes. \"\"\"", "np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for seed in result]) steps = np.array(steps)", "result = np.zeros(EPISODES) for seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed)", "0 train = {} validation = {} for seed in seeds: result[seed] =", "option. Depending on option, returns: if option == 'seed' - returns all seeds", "given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as f: last_line", "path[1] + \\ f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert len(paths) == 1,", "# log files for each step are generated for seeds values = list(set(values))", "step): \"\"\"Gets a 0-1 array of shape (episodes) where episodes is the number", "get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array of costs for given model for", "1 one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result += one_hot return result", "to be passed. \"\"\" if option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs =", "for seed in seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i,", "get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with given outcome for a given model.", "for seed in result]) steps = np.array(steps) return steps, result else: return None,", "outcome == 1, returns successful episodes, if outcome == 0, returns failing episodes.", "seed) with open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps", "x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return", "to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path to the", "min_length = 100 max_length = 0 for seed in seeds: result[seed] = []", "(value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to the", "used to access the data about experiments. This includes reading logs to parse", "import numpy as np from glob import glob import torch import pandas import", "all seeds for given experiment. experiment has to passed. if option == 'checkpoint'", "result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an array of speeds", "experiment across all checkpoints. The resulting shape of the np array is (seeds,", "as f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the", "result = [] for i in range(len(states)): episode_states = states[i] episode_states = list(map(lambda", "{paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path to the eval", "if option == 'episode' - returns all episodes for given model experiment, seed,", "image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def", "np.array(success) success = success - 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] =", "in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success) success =", "\"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not None: return costs[episode", "episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = []", "result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed,", "return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation total", "path[0] + 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' + '.log' paths = glob(regex)", "checkpoint): \"\"\" Returns an array of costs for given model for all episodes\"\"\"", "failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as f:", "as f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):])", "= sorted(glob(gradient_paths)) if len(images) == 0: return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1]", "step) with open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result", "model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' +", "of shape (episodes) where episodes is the number of episodes, where Ith value", "success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each seed for the", "possible values for selected option. Depending on option, returns: if option == 'seed'", "[] train_stds = [] validation_means = [] validation_stds = [] for key in", "1, returns successful episodes, if outcome == 0, returns failing episodes. \"\"\" path", "cost.tolist()) for cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint):", "@staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array of costs for", "an array of costs for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment]", "[] min_length = 100 max_length = 0 train = {} validation = {}", "shape of the np array is (seeds, checkpoints), where seeds is the number", "the gradient, x, y are the location of this max value in the", "== 0, returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path,", "is not 1' states_path = states_paths[0] states = torch.load(states_path) result = [] for", "seed, checkpoint) if costs is not None: return costs[episode - 1] else: return", "DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success) success = success - 1 one_hot", "np.squeeze(one_hot) result += one_hot return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\"", "get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation total losses for a given experiment", "episodes, where Ith value is the number of models in this experiment that", "f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths for", "seeds and checkpoints, and returns an array of shape (episodes) where episodes is", "one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result +=", "episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode):", "\\ f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths", "+ '.model.states' states_paths = glob(regex) assert len(states_paths) == 1, \\ f'states_paths for {regex}", "reading logs to parse success cases, reading images, costs and speed. \"\"\" import", "train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds = [] validation_means = []", "f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in", "shape (episodes) where episodes is the number of episodes, where Ith value is", "validation_stds = [] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result =", "images = sorted(glob(gradient_paths)) if len(images) == 0: return (0, 0, 0) image_path =", "of the gradient, x, y are the location of this max value in", "episode): \"\"\" Returns an array of data frames with all the costs for", "success cases, reading images, costs and speed. \"\"\" import numpy as np from", "speeds for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0]", "checkpoint, episode): \"\"\"Get the last gradient for the model and episode Returns: (value,", "regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in lines: match = regex.match(line)", "1, and is equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None,", "'rb') as f: images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get", "it\\'s length is not 1' states_path = states_paths[0] states = torch.load(states_path) result =", "costs and speed. \"\"\" import numpy as np from glob import glob import", "all the models with all possible seeds and checkpoints, and returns an array", "seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in checkpoints:", "= dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment,", "np.zeros(EPISODES) for seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint", "on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'", ") return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result = {}", "+ '.model.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths for {regex}", "= path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths =", "a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images", "= json.load(f) return x @staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images", "log in logs: m = re.search(regexp, log) if m: result = m.group(1) values.append(int(result))", "tuple, where value is the max value of the gradient, x, y are", "train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome):", "selected option. Depending on option, returns: if option == 'seed' - returns all", "training and validation total losses for a given experiment and seed. \"\"\" path", "x, y) - tuple, where value is the max value of the gradient,", "import glob import torch import pandas import re import json from functools import", "checkpoint) if costs is not None: return costs[episode - 1] else: return None", "not None: return costs[episode - 1] else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment,", "'planning_results/' + path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint': path", "seed, checkpoint, episode): \"\"\"Get the last gradient for the model and episode Returns:", "the eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] +", "models with all possible seeds and checkpoints, and returns an array of shape", "middle_y = image.shape[1] / 2 x = mx_index // image.shape[1] x -= middle_x", "middle_x y = mx_index % image.shape[1] y -= middle_y if value == 0:", "np.array(steps) return steps, result else: return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets", "= dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds =", "= re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in lines: match = regex.match(line) if", "logs = glob(path[0] + 'planning_results/' + path[1] + '*.log') regexp = r\"seed=(\\d+)-\" elif", "steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment, seed, step,", "= DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp =", "train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def", "+ path[1] + \\ f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert len(paths) ==", "seed=None, checkpoint=None): \"\"\"Returns possible values for selected option. Depending on option, returns: if", "min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]),", "return None else: raw_costs = torch.load(costs_paths[0]) # list of DataFrame, one per episode", "[] train_losses = [] validation_losses = [] for line in lines: match =", "else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array", "array of data frames with all the costs for given evaluation \"\"\" costs", "get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an array of data frames with all", "- 1] else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns", "- 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an array of", "This includes reading logs to parse success cases, reading images, costs and speed.", "enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds = [] validation_means =", "return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array of", "else: raw_costs = torch.load(costs_paths[0]) # list of DataFrame, one per episode costs =", "performance of all the models with all possible seeds and checkpoints, and returns", "arrays for each seed for the given experiment across all checkpoints. The resulting", "episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed in seeds:", "episodes with given outcome for a given model. If outcome == 1, returns", "and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes)", "otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes) - 1", "'checkpoint', experiment, seed) for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1)", "\\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert len(paths) == 1, \\ f'paths", "\"\"\"Reads the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r')", "an array of data frames with all the costs for given evaluation \"\"\"", "successes = np.array(successes) - 1 result = np.zeros(EPISODES) result[successes] = 1 return result", "path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp", "for the given experiment across all checkpoints. The resulting shape of the np", "'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert len(states_paths)", "in sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_gradients(experiment,", "[] validation_stds = [] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result", "is the number of models in this experiment that succeeded in this episode.", "gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb')", "validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result", "= path[0] + 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' + '.log' paths =", "in checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed]))", "of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = {} steps = []", "model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths", "experiment) result = np.zeros(EPISODES) for seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment,", "episodes checks performance of all the models with all possible seeds and checkpoints,", "seed in seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step", "+ path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert len(states_paths) ==", "evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths =", "def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array of speeds for given model", "0: return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index =", "dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment, seed,", "\\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if len(costs_paths) == 0: print( f'costs_paths", "for a given model. If outcome == 1, returns successful episodes, if outcome", "this experiment that succeeded in this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result", "costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array of speeds", "= min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result) > 0: result =", "\"\"\"For a given experiment, for all episodes checks performance of all the models", "re import json from functools import lru_cache import imageio EPISODES = 561 class", "None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array of costs", "f'states_paths for {regex} is {states_paths} and it\\'s length is not 1' states_path =", "@staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for a given model evaluation", "= {} steps = [] min_length = 100 max_length = 0 train =", "option, returns: if option == 'seed' - returns all seeds for given experiment.", "np.array(successes) - 1 result = np.zeros(EPISODES) result[successes] = 1 return result @staticmethod def", "episode): \"\"\"Get simulator images for a given model evaluation on a given episode\"\"\"", "+ 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert", "'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert len(paths)", "array of shape (episodes) where episodes is the number of episodes, where Ith", "for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping():", "logs = glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log') regexp =", "option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1]", "regex.match(line) if match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment,", "DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint,", "given experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as", "images, costs and speed. \"\"\" import numpy as np from glob import glob", "{paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values", "[] for line in lines: match = regex.match(line) if match: if int(match.group(2)) ==", "\"\"\"Returns possible values for selected option. Depending on option, returns: if option ==", "= regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses,", "== 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] +", "1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot, axis=0),", "get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images for a given model evaluation on", "result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result = {} steps =", "the number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = {} steps", "and is equal to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a", "print(f'{log} doesn\\'t contain {option}') # log files for each step are generated for", "= DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as f: last_line = f.readlines()[-1] last_colon", "given experiment, for all episodes checks performance of all the models with all", "DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in", "= 0 train = {} validation = {} for seed in seeds: result[seed]", "json from functools import lru_cache import imageio EPISODES = 561 class DataReader: \"\"\"Container", "image = imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0] /", "= np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y = image.shape[1]", "imageio EPISODES = 561 class DataReader: \"\"\"Container class for the static data access", "number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = {} steps =", "checkpoint, episode): \"\"\"Get gradients for a given model evaluation on a given episode\"\"\"", "= DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) == 0: return", "\"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as f: lines =", "return (0, 0, 0) else: return (value, x, y) @staticmethod def get_evaluation_log_file(experiment, seed,", "re.search(regexp, log) if m: result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}')", "if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return", "length is not 1') return None else: raw_costs = torch.load(costs_paths[0]) # list of", "get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for a given model evaluation on a", "all checkpoints. The resulting shape of the np array is (seeds, checkpoints), where", "float(last_line[(last_colon + 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for", "array of shape (episodes) where episodes is the number of episodes. Ith value", "f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = [] for log in logs: m =", "all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] +", "where value is the max value of the gradient, x, y are the", "= DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed in seeds: checkpoints = DataReader.find_option_values(", "x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return result", "\"\"\"Gets episodes with given outcome for a given model. If outcome == 1,", "checkpoint, 1) success = np.array(success) success = success - 1 one_hot = np.zeros((len(success),", "steps = checkpoints for checkpoint in checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint)", "get_success_rate(experiment, seed, step): \"\"\"get the success rate for a given model\"\"\" log_file =", "speeds for given model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1]", "from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x =", "list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct", "== 1, \\ f'paths for {regex} is not length of 1, and is", "f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last", "length is not 1' states_path = states_paths[0] states = torch.load(states_path) result = []", "= glob(regex) assert len(states_paths) == 1, \\ f'states_paths for {regex} is {states_paths} and", "DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) ==", "% image.shape[1] y -= middle_y if value == 0: return (0, 0, 0)", "+ path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = [] for log in", "for i in range(len(states)): episode_states = states[i] episode_states = list(map(lambda x: x[-1], episode_states))", "= DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + '*.log') regexp =", "= DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f: lines = f.readlines() regex =", "it\\'s length is not 1') return None else: raw_costs = torch.load(costs_paths[0]) # list", "as f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses =", "get_training_log_file(experiment, seed): \"\"\"Retuns a path to the eval logs for given model\"\"\" path", "None: return costs[episode - 1] else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed,", "@staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last gradient for the model", "0 if the ith episode failed, and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment,", "result += one_hot return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns", "with static methods which can be used to access the data about experiments.", "+ 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert", "option == 'episode' - returns all episodes for given model experiment, seed, and", "- returns all checkpoints for given experiment and seed. experiment and seed have", "// image.shape[1] x -= middle_x y = mx_index % image.shape[1] y -= middle_y", "seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in checkpoints: success =", "passed. \"\"\" if option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] +", "DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not None: return costs[episode - 1] else:", "= [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) < len(checkpoints): steps", "x @staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images for a given", "+ path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if len(costs_paths) ==", "the np array is (seeds, checkpoints), where seeds is the number of seeds,", "middle_x = image.shape[0] / 2 middle_y = image.shape[1] / 2 x = mx_index", "= 100 max_length = 0 for seed in seeds: result[seed] = [] checkpoints", "the result is 0 if the ith episode failed, and 1 otherwise. \"\"\"", "is equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None):", "success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length =", "{states_paths} and it\\'s length is not 1' states_path = states_paths[0] states = torch.load(states_path)", "for log in logs: m = re.search(regexp, log) if m: result = m.group(1)", "success rate for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file,", "seeds is the number of seeds, and checkpints is the number of checkpoints.", "be used to access the data about experiments. This includes reading logs to", "return result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array of shape", "path[1] + f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode': path", "= glob(regex) if len(costs_paths) == 0: print( f'costs_paths for {regex} is {costs_paths} and", "is the number of episodes, where Ith value is the number of models", "experiments. This includes reading logs to parse success cases, reading images, costs and", "0, 0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image) value =", "@staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each seed for the given", "mx_index // image.shape[1] x -= middle_x y = mx_index % image.shape[1] y -=", "equal to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path to", "== 0: return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index", "where Ith value is the number of models in this experiment that succeeded", "image.shape[0] / 2 middle_y = image.shape[1] / 2 x = mx_index // image.shape[1]", "for a given experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path,", "an array of speeds for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment]", "episode_states = states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:,", "\"\"\" seeds = DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length =", "validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds = [] validation_means = [] validation_stds =", "logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' +", "path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex)", "= DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log')", "in lines: match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict(", "experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for selected option. Depending on option, returns:", "else cost.tolist()) for cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed,", "of seeds, and checkpints is the number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed',", "train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets", "successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes) - 1 result =", "losses for a given experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with", "'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}'", "step, outcome): \"\"\"Gets episodes with given outcome for a given model. If outcome", "'*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs =", "is {costs_paths} and it\\'s length is not 1') return None else: raw_costs =", "seed) for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success =", "from functools import lru_cache import imageio EPISODES = 561 class DataReader: \"\"\"Container class", "in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means,", "= DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes) - 1 result = np.zeros(EPISODES)", "\"\"\" import numpy as np from glob import glob import torch import pandas", "seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']):", "success rate arrays for each seed for the given experiment across all checkpoints.", "x, y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to the eval", "= [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs] return", "is (seeds, checkpoints), where seeds is the number of seeds, and checkpints is", "for checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success)", "of DataFrame, one per episode costs = [pandas.DataFrame(cost if type(cost) == type([]) else", "[] for line in lines: match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3)))", "this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed in", "import re import json from functools import lru_cache import imageio EPISODES = 561", "100 max_length = 0 train = {} validation = {} for seed in", "access the data about experiments. This includes reading logs to parse success cases,", "if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for", "raw_costs = torch.load(costs_paths[0]) # list of DataFrame, one per episode costs = [pandas.DataFrame(cost", "values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the success rate for a given", "len(images) == 0: return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path)", "checkpoint have to be passed. \"\"\" if option == 'seed': path = DataReader.get_experiments_mapping()[experiment]", "\"\"\" Returns an array of speeds for given model for all episodes\"\"\" path", "where seeds is the number of seeds, and checkpints is the number of", "given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' + path[1] +", "= [] for line in lines: match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2)))", "@staticmethod def get_images(experiment, seed, checkpoint, episode): \"\"\"Get simulator images for a given model", "option == 'checkpoint' - returns all checkpoints for given experiment and seed. experiment", "given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/'", "experiment, for all episodes checks performance of all the models with all possible", "seed, checkpoint): \"\"\" Returns an array of costs for given model for all", "== 0: print( f'costs_paths for {regex} is {costs_paths} and it\\'s length is not", "1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\" Returns an array of data", "evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not None: return", "match = regex.match(line) if match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod", "DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\")", "successful episodes, if outcome == 0, returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment,", "path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log' paths = glob(regex) assert len(paths) == 1,", "f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert len(states_paths) == 1, \\ f'states_paths for", "r'model/ep(\\d+)' values = [] for log in logs: m = re.search(regexp, log) if", "of this max value in the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name", "possible seeds and checkpoints, and returns an array of shape (episodes) where episodes", "'r') as f: x = json.load(f) return x @staticmethod def get_images(experiment, seed, checkpoint,", "EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result", "model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' + path[1] + \\", "= glob(regex) assert len(paths) == 1, \\ f'paths for {regex} is not length", "ith episode failed, and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1)", "costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs]", "of shape (episodes) where episodes is the number of episodes. Ith value in", "passed. if option == 'episode' - returns all episodes for given model experiment,", "return (0, 0, 0) image_path = sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image)", "number of models in this experiment that succeeded in this episode. \"\"\" seeds", "checkpoint in checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length,", "all possible seeds and checkpoints, and returns an array of shape (episodes) where", "1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes) -", "this max value in the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name =", "np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y = image.shape[1] /", "states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states) return result", "= torch.load(costs_paths[0]) # list of DataFrame, one per episode costs = [pandas.DataFrame(cost if", "mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y =", "torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment,", "regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, )", "DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint',", "DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length = 100 max_length =", "1, \\ f'states_paths for {regex} is {states_paths} and it\\'s length is not 1'", "+ path[1] + f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode':", "'episode' - returns all episodes for given model experiment, seed, and checkpoint have", "len(paths) == 1, \\ f'paths for {regex} is not length of 1, and", "\"\"\"Get gradients for a given model evaluation on a given episode\"\"\" path =", "def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for a given model evaluation on", "{} for seed in seeds: result[seed] = [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for", "= np.array(success) success = success - 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success]", "model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) == 0:", "[] for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images", "the max value of the gradient, x, y are the location of this", "for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not", "list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the success rate", "with open(image_path, 'rb') as f: images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed, checkpoint,", "(seeds, checkpoints), where seeds is the number of seeds, and checkpints is the", "costs[episode - 1] else: return None @staticmethod @lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\"", "return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result = {} steps", "DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'", "option == 'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1]", "outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets a 0-1 array", "= [] validation_stds = [] for key in train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key])))", "steps = [] min_length = 100 max_length = 0 for seed in seeds:", "is equal to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path", "given model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1]", "def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for selected option. Depending on", "if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed, step): \"\"\"Gets", "None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation total losses", "[]).append(curves['validation_losses'][i]) train_means = [] train_stds = [] validation_means = [] validation_stds = []", "given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint,", "validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment):", "0, returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r')", "curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i])", "a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f)", "outcome): \"\"\"Gets episodes with given outcome for a given model. If outcome ==", "step): \"\"\"Retuns a path to the eval logs for given model\"\"\" path =", "value is the number of models in this experiment that succeeded in this", "and it\\'s length is not 1' states_path = states_paths[0] states = torch.load(states_path) result", "/ 2 middle_y = image.shape[1] / 2 x = mx_index // image.shape[1] x", "DataReader.get_evaluation_log_file(experiment, seed, step) with open(path, 'r') as f: lines = f.readlines() regex =", "of episodes. Ith value in the result is 0 if the ith episode", "np array is (seeds, checkpoints), where seeds is the number of seeds, and", "gradient for the model and episode Returns: (value, x, y) - tuple, where", "- 1 one_hot = np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot,", "seed): \"\"\"Retuns a path to the eval logs for given model\"\"\" path =", "array of speeds for given model for all episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex", "for the model and episode Returns: (value, x, y) - tuple, where value", "\"\"\"get success rate arrays for each seed for the given experiment across all", "an array of shape (episodes) where episodes is the number of episodes, where", "success] = 1 one_hot = np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result += one_hot", "result[seed] = [] checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) if len(steps) < len(checkpoints):", "of 1, and is equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option,", "= mx_index // image.shape[1] x -= middle_x y = mx_index % image.shape[1] y", "'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = [] for log", "result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds", "[] for i in range(len(states)): episode_states = states[i] episode_states = list(map(lambda x: x[-1],", "models in this experiment that succeeded in this episode. \"\"\" seeds = DataReader.find_option_values('seed',", "costs = DataReader.get_model_costs(experiment, seed, checkpoint) if costs is not None: return costs[episode -", "if type(cost) == type([]) else cost.tolist()) for cost in raw_costs] return costs @staticmethod", "'*.log') regexp = r\"seed=(\\d+)-\" elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs =", "images.append(f.read()) return images @staticmethod def get_last_gradient(experiment, seed, checkpoint, episode): \"\"\"Get the last gradient", "min_length = 100 max_length = 0 train = {} validation = {} for", "== type([]) else cost.tolist()) for cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def", "given model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def", "return paths[0] @staticmethod @lru_cache(maxsize=100) def find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for", "with all the costs for given evaluation \"\"\" costs = DataReader.get_model_costs(experiment, seed, checkpoint)", "the number of episodes. Ith value in the result is 0 if the", "model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment,", "step): \"\"\"get the success rate for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed,", "episodes is the number of episodes, where Ith value is the number of", "'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue' + '.log' paths = glob(regex) assert len(paths)", "max_length = 0 for seed in seeds: result[seed] = [] checkpoints = DataReader.find_option_values(", "'edge') for seed in result]) steps = np.array(steps) return steps, result else: return", "for line in lines: match = regex.match(line) if match: if int(match.group(2)) == outcome:", "of episodes, where Ith value is the number of models in this experiment", "path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'policy_networks/' + path[1] + \\ f'-seed={seed}-novalue'", "= list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the success", "validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with", "'seed': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + '*.log')", "checkpoint, episode): \"\"\" Returns an array of data frames with all the costs", "[] for log in logs: m = re.search(regexp, log) if m: result =", "1') return None else: raw_costs = torch.load(costs_paths[0]) # list of DataFrame, one per", "a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images", "= [] curves = DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i])", "get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each seed for the given experiment across", "given outcome for a given model. If outcome == 1, returns successful episodes,", "experiment has to passed. if option == 'checkpoint' - returns all checkpoints for", "generated for seeds values = list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment, seed,", "given experiment across all checkpoints. The resulting shape of the np array is", "'seed' - returns all seeds for given experiment. experiment has to passed. if", "image.flatten()[mx_index] middle_x = image.shape[0] / 2 middle_y = image.shape[1] / 2 x =", "= r\"seed=(\\d+)-\" elif option == 'checkpoint': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] +", "len(result[seed])) if len(result) > 0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge')", "where episodes is the number of episodes, where Ith value is the number", "checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success) success = success", "== 'seed' - returns all seeds for given experiment. experiment has to passed.", "# is it correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\"", "is 0 if the ith episode failed, and 1 otherwise. \"\"\" successes =", "last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return success_rate", "static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the", "Ith value is the number of models in this experiment that succeeded in", "path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if len(costs_paths) == 0:", "def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result = {} steps = [] min_length", "(value, x, y) - tuple, where value is the max value of the", "episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return result @staticmethod", "def get_experiments_mapping(): \"\"\"Reads the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with", "gradients for a given model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0]", "lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in lines:", "episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = []", "if the ith episode failed, and 1 otherwise. \"\"\" successes = DataReader.get_episodes_with_outcome(experiment, seed,", "seed, step): \"\"\"Retuns a path to the eval logs for given model\"\"\" path", "match: if int(match.group(2)) == outcome: result.append(int(match.group(1))) return result @staticmethod def get_episode_success_map(experiment, seed, step):", "is not length of 1, and is equal to {paths}' return paths[0] @staticmethod", "in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an", "model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in sorted(glob(gradient_paths)):", "path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for", "success_rate = float(last_line[(last_colon + 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate", "an array of speeds for given model and given episode\"\"\" return DataReader.get_model_speeds(experiment, seed,", "is not 1') return None else: raw_costs = torch.load(costs_paths[0]) # list of DataFrame,", "step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means = [] train_stds = []", "doesn\\'t contain {option}') # log files for each step are generated for seeds", "values.sort() return values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the success rate for", "for checkpoint in checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length =", "as f: x = json.load(f) return x @staticmethod def get_images(experiment, seed, checkpoint, episode):", "= DataReader.get_learning_curves_for_seed(experiment, seed) for i, step in enumerate(curves['steps']): train.setdefault(step, []).append(curves['train_losses'][i]) validation.setdefault(step, []).append(curves['validation_losses'][i]) train_means", "image.shape[1] / 2 x = mx_index // image.shape[1] x -= middle_x y =", "= 1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all", "class for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def", "train: train_means.append(float(np.mean(train[key]))) train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds),", "validation_losses=validation_losses, ) return result @staticmethod def get_learning_curves_for_experiment(experiment): seeds = DataReader.find_option_values('seed', experiment) result =", "train_stds = [] validation_means = [] validation_stds = [] for key in train:", "get_experiments_mapping(): \"\"\"Reads the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE,", "'checkpoint' - returns all checkpoints for given experiment and seed. experiment and seed", "across all checkpoints. The resulting shape of the np array is (seeds, checkpoints),", "logs: m = re.search(regexp, log) if m: result = m.group(1) values.append(int(result)) else: print(f'{log}", "the model and episode Returns: (value, x, y) - tuple, where value is", "result is 0 if the ith episode failed, and 1 otherwise. \"\"\" successes", "gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) == 0: return (0, 0,", "2 x = mx_index // image.shape[1] x -= middle_x y = mx_index %", "# list of DataFrame, one per episode costs = [pandas.DataFrame(cost if type(cost) ==", "paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path to the eval logs for", "r'-novaluestep(\\d+)\\.' elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/'", "max_length = 0 train = {} validation = {} for seed in seeds:", "regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths", "= glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values =", "checkpoints: success = DataReader.get_success_rate( experiment, seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length", "DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' + '.model.log'", "1 result = np.zeros(EPISODES) result[successes] = 1 return result @staticmethod def get_episodes_success_counts(experiment): \"\"\"For", "for seed in seeds: checkpoints = DataReader.find_option_values( 'checkpoint', experiment, seed) for checkpoint in", "not length of 1, and is equal to {paths}' return paths[0] @staticmethod def", "class DataReader: \"\"\"Container class for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json'", "episode): \"\"\" Returns an array of speeds for given model and given episode\"\"\"", "= torch.load(states_path) result = [] for i in range(len(states)): episode_states = states[i] episode_states", "be passed. if option == 'episode' - returns all episodes for given model", "y) @staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to the eval logs", "episodes\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\", "mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x", "given experiment. experiment has to passed. if option == 'checkpoint' - returns all", "option == 'seed' - returns all seeds for given experiment. experiment has to", "{regex} is {costs_paths} and it\\'s length is not 1') return None else: raw_costs", "value == 0: return (0, 0, 0) else: return (value, x, y) @staticmethod", "reading images, costs and speed. \"\"\" import numpy as np from glob import", "experiment, seed, and checkpoint have to be passed. \"\"\" if option == 'seed':", "DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r') as f: last_line = f.readlines()[-1] last_colon =", "'checkpoint', experiment, seed) if len(steps) < len(checkpoints): steps = checkpoints for checkpoint in", "images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for a", "y) - tuple, where value is the max value of the gradient, x,", "seed, and checkpoint have to be passed. \"\"\" if option == 'seed': path", "as np from glob import glob import torch import pandas import re import", "def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to the eval logs for given", "{option}') # log files for each step are generated for seeds values =", "lines: match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps,", "- tuple, where value is the max value of the gradient, x, y", "steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result = dict( steps=steps, train_losses=train_losses, validation_losses=validation_losses, ) return result @staticmethod", "@staticmethod def get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all episodes checks performance of", "1, and is equal to {paths}' return paths[0] @staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns", "DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs'", "+ 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values = [] for", "seed, checkpoint) result[seed].append(success) min_length = min(min_length, len(result[seed])) max_length = max(max_length, len(result[seed])) if len(result)", "of all the models with all possible seeds and checkpoints, and returns an", "= sorted(glob(gradient_paths))[-1] image = imageio.imread(image_path) mx_index = np.argmax(image) value = image.flatten()[mx_index] middle_x =", "option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1]", "np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result += one_hot return result @staticmethod def get_episode_speeds(experiment,", "[pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs] return costs", "returns all seeds for given experiment. experiment has to passed. if option ==", "the number of models in this experiment that succeeded in this episode. \"\"\"", "to the eval logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0]", "line in lines: match = regex.match(line) if match: steps.append(int(match.group(1))) train_losses.append(float(match.group(2))) validation_losses.append(float(match.group(3))) result =", "values = [] for log in logs: m = re.search(regexp, log) if m:", "print( f'costs_paths for {regex} is {costs_paths} and it\\'s length is not 1') return", "= [] train_losses = [] validation_losses = [] for line in lines: match", "= 561 class DataReader: \"\"\"Container class for the static data access methods\"\"\" EXPERIMENTS_MAPPING_FILE", "returns all episodes for given model experiment, seed, and checkpoint have to be", "= glob(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.'", "succeeded in this episode. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result = np.zeros(EPISODES) for", "methods\"\"\" EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json' @staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping from", "last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success", "for cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment, seed, checkpoint): \"\"\"", "\"\"\"A class with static methods which can be used to access the data", "= np.sum(one_hot, axis=0), one_hot = np.squeeze(one_hot) result += one_hot return result @staticmethod def", "a given model evaluation on a given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name =", "@staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes with given outcome for a", "in logs: m = re.search(regexp, log) if m: result = m.group(1) values.append(int(result)) else:", "\"\"\" Returns an array of costs for given model for all episodes\"\"\" path", "image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images =", "as f: images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients", "given episode\"\"\" path = DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images =", "path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f: lines = f.readlines() regex", "episodes for given model experiment, seed, and checkpoint have to be passed. \"\"\"", "rate for a given model\"\"\" log_file = DataReader.get_evaluation_log_file(experiment, seed, step) with open(log_file, 'r')", "a given experiment and seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r')", "last gradient for the model and episode Returns: (value, x, y) - tuple,", "not length of 1, and is equal to {paths}' return paths[0] @staticmethod @lru_cache(maxsize=100)", "episode): \"\"\"Get the last gradient for the model and episode Returns: (value, x,", "in result]) steps = np.array(steps) return steps, result else: return None, None @staticmethod", "episode_states = states[i] episode_states = list(map(lambda x: x[-1], episode_states)) episode_states = torch.stack(episode_states) result.append(episode_states)", "path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}'", "DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)'", "number of episodes, where Ith value is the number of models in this", "- returns all episodes for given model experiment, seed, and checkpoint have to", "episodes. Ith value in the result is 0 if the ith episode failed,", "seed, step) with open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\")", "type([]) else cost.tolist()) for cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10) def get_model_speeds(experiment,", "Ith value in the result is 0 if the ith episode failed, and", "returns an array of shape (episodes) where episodes is the number of episodes,", "location of this max value in the gradient image. \"\"\" path = DataReader.get_experiments_mapping()[experiment][0]", "array is (seeds, checkpoints), where seeds is the number of seeds, and checkpints", "m = re.search(regexp, log) if m: result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t", "with open(path, 'r') as f: lines = f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result =", "\"\"\"Gets a 0-1 array of shape (episodes) where episodes is the number of", "checkpoint): \"\"\" Returns an array of states for given model for all episodes\"\"\"", "glob(regex) assert len(states_paths) == 1, \\ f'states_paths for {regex} is {states_paths} and it\\'s", "episodes, if outcome == 0, returns failing episodes. \"\"\" path = DataReader.get_evaluation_log_file(experiment, seed,", "if value == 0: return (0, 0, 0) else: return (value, x, y)", "= r'-novaluestep(\\d+)\\.' elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] +", "return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array of", "find_option_values(option, experiment=None, seed=None, checkpoint=None): \"\"\"Returns possible values for selected option. Depending on option,", "= [] for image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return", "speed. \"\"\" import numpy as np from glob import glob import torch import", "0: result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge') for seed in result])", "- returns all seeds for given experiment. experiment has to passed. if option", "@staticmethod def get_evaluation_log_file(experiment, seed, step): \"\"\"Retuns a path to the eval logs for", "len(states_paths) == 1, \\ f'states_paths for {regex} is {states_paths} and it\\'s length is", "of models in this experiment that succeeded in this episode. \"\"\" seeds =", "last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon + 2):]) return success_rate @staticmethod def get_success_rates_for_experiment(experiment):", "@staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the success rate for a given model\"\"\"", "episode_states = torch.stack(episode_states) result.append(episode_states[:, 2:].norm(dim=1)) # is it correct return result @staticmethod @lru_cache(maxsize=10)", "None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training and validation total losses for", "model_name = DataReader.get_experiments_mapping()[experiment][1] image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in sorted(glob(image_paths)):", "== 0: return (0, 0, 0) else: return (value, x, y) @staticmethod def", "f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = sorted(glob(gradient_paths)) if len(images) == 0: return (0, 0, 0) image_path", "given model. If outcome == 1, returns successful episodes, if outcome == 0,", "EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f) return x @staticmethod", "list of DataFrame, one per episode costs = [pandas.DataFrame(cost if type(cost) == type([])", "@staticmethod def get_training_log_file(experiment, seed): \"\"\"Retuns a path to the eval logs for given", "get_model_speeds(experiment, seed, checkpoint): \"\"\" Returns an array of speeds for given model for", "if option == 'seed' - returns all seeds for given experiment. experiment has", "with given outcome for a given model. If outcome == 1, returns successful", "DataReader.get_experiments_mapping()[experiment] logs = glob(path[0] + 'planning_results/' + path[1] + '*.log') regexp = r\"seed=(\\d+)-\"", "f.readlines() regex = re.compile(\".*ep:\\s+(\\d+).*\\|\\ssuccess:\\s+(\\d).*\") result = [] for line in lines: match =", "steps = np.array(steps) return steps, result else: return None, None @staticmethod def get_learning_curves_for_seed(experiment,", "regexp = r'-novaluestep(\\d+)\\.' elif option == 'episode': path = DataReader.get_experiments_mapping()[experiment] logs = glob(path[0]", "validation_losses = [] for line in lines: match = regex.match(line) if match: steps.append(int(match.group(1)))", "result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return result @staticmethod def", "one_hot = np.squeeze(one_hot) result += one_hot return result @staticmethod def get_episode_speeds(experiment, seed, checkpoint,", "= DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{step}' +", "2:].norm(dim=1)) # is it correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint):", "if costs is not None: return costs[episode - 1] else: return None @staticmethod", "get_episodes_success_counts(experiment): \"\"\"For a given experiment, for all episodes checks performance of all the", "image.shape[1] x -= middle_x y = mx_index % image.shape[1] y -= middle_y if", "states_paths[0] states = torch.load(states_path) result = [] for i in range(len(states)): episode_states =", "checkpoint, episode): \"\"\"Get simulator images for a given model evaluation on a given", "f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png' images = [] for image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as f:", "to passed. if option == 'checkpoint' - returns all checkpoints for given experiment", "can be used to access the data about experiments. This includes reading logs", "DataReader.get_episodes_with_outcome(experiment, seed, step, 1) successes = np.array(successes) - 1 result = np.zeros(EPISODES) result[successes]", "@staticmethod def get_episode_speeds(experiment, seed, checkpoint, episode): \"\"\" Returns an array of speeds for", "steps, result else: return None, None @staticmethod def get_learning_curves_for_seed(experiment, seed): \"\"\"Gets the training", "seeds, and checkpints is the number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment)", "return DataReader.get_model_speeds(experiment, seed, checkpoint)[episode - 1] @staticmethod def get_episode_costs(experiment, seed, checkpoint, episode): \"\"\"", "log) if m: result = m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') #", "values for selected option. Depending on option, returns: if option == 'seed' -", "result]) steps = np.array(steps) return steps, result else: return None, None @staticmethod def", "= r'model/ep(\\d+)' values = [] for log in logs: m = re.search(regexp, log)", "'planning_results/' + path[1] + \\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs' costs_paths = glob(regex) if len(costs_paths)", "validation=(validation_means, validation_stds), ) return result @staticmethod def get_episodes_with_outcome(experiment, seed, step, outcome): \"\"\"Gets episodes", "@staticmethod @lru_cache(maxsize=1) def get_experiments_mapping(): \"\"\"Reads the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE", "file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f: x = json.load(f) return x", "+ 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log') regexp = r'-novaluestep(\\d+)\\.' elif option", "middle_y if value == 0: return (0, 0, 0) else: return (value, x,", "f: images.append(f.read()) return images @staticmethod def get_gradients(experiment, seed, checkpoint, episode): \"\"\"Get gradients for", "The resulting shape of the np array is (seeds, checkpoints), where seeds is", "are generated for seeds values = list(set(values)) values.sort() return values @staticmethod def get_success_rate(experiment,", "the given experiment across all checkpoints. The resulting shape of the np array", "@staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array of states for", "import torch import pandas import re import json from functools import lru_cache import", "glob(regex) assert len(paths) == 1, \\ f'paths for {regex} is not length of", "open(log_file, 'r') as f: last_line = f.readlines()[-1] last_colon = last_line.rfind(':') success_rate = float(last_line[(last_colon", "logs for given model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' +", "get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array of states for given model for", "seed. \"\"\" path = DataReader.get_training_log_file(experiment, seed) with open(path, 'r') as f: lines =", "0-1 array of shape (episodes) where episodes is the number of episodes. Ith", "f.readlines() regex = re.compile(\".*step\\s(\\d+).*\\s\\[.*\\π\\:\\s(.*)\\].*\\[.*\\π\\:\\s(.*)\\]\") steps = [] train_losses = [] validation_losses = []", "image.shape[1] y -= middle_y if value == 0: return (0, 0, 0) else:", "the experiments mapping from a json file EXPERIMENTS_MAPPING_FILE \"\"\" with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as", "and checkpoint have to be passed. \"\"\" if option == 'seed': path =", "\\ f'-seed={seed}-novaluestep{checkpoint}' + '.model.states' states_paths = glob(regex) assert len(states_paths) == 1, \\ f'states_paths", "\"\"\"Gets the training and validation total losses for a given experiment and seed.", "f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path in sorted(glob(gradient_paths)): with open(image_path, 'rb') as f:", "(episodes) where episodes is the number of episodes. Ith value in the result", "model\"\"\" path = DataReader.get_experiments_mapping()[experiment] regex = path[0] + 'planning_results/' + path[1] + \\", "value is the max value of the gradient, x, y are the location", "= [] for log in logs: m = re.search(regexp, log) if m: result", "type(cost) == type([]) else cost.tolist()) for cost in raw_costs] return costs @staticmethod @lru_cache(maxsize=10)", "checkpoint in checkpoints: success = DataReader.get_episodes_with_outcome(experiment, seed, checkpoint, 1) success = np.array(success) success", "seed, checkpoint): \"\"\" Returns an array of speeds for given model for all", "= DataReader.get_experiments_mapping()[experiment][0] model_name = DataReader.get_experiments_mapping()[experiment][1] gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png' images = [] for image_path", "train_means = [] train_stds = [] validation_means = [] validation_stds = [] for", "return success_rate @staticmethod def get_success_rates_for_experiment(experiment): \"\"\"get success rate arrays for each seed for", "assert len(paths) == 1, \\ f'paths for {regex} is not length of 1,", "@lru_cache(maxsize=10) def get_model_costs(experiment, seed, checkpoint): \"\"\" Returns an array of costs for given", "== 1, \\ f'states_paths for {regex} is {states_paths} and it\\'s length is not", "value of the gradient, x, y are the location of this max value", "outcome for a given model. If outcome == 1, returns successful episodes, if", "and checkpints is the number of checkpoints. \"\"\" seeds = DataReader.find_option_values('seed', experiment) result", "\"\"\" Returns an array of speeds for given model and given episode\"\"\" return", "= m.group(1) values.append(int(result)) else: print(f'{log} doesn\\'t contain {option}') # log files for each", "logs = glob(path[0] + 'planning_results/videos_simulator/' + path[1] + f'-seed={seed}-novaluestep{checkpoint}.model/ep*') regexp = r'model/ep(\\d+)' values", "train = {} validation = {} for seed in seeds: result[seed] = []", "train_stds.append(float(np.std(train[key]))) validation_means.append(float(np.mean(validation[key]))) validation_stds.append(float(np.std(validation[key]))) result = dict( steps=list(train.keys()), train=(train_means, train_stds), validation=(validation_means, validation_stds), ) return", "= np.zeros((len(success), EPISODES)) one_hot[np.arange(len(success)), success] = 1 one_hot = np.sum(one_hot, axis=0), one_hot =", "[] min_length = 100 max_length = 0 for seed in seeds: result[seed] =", "[] for image_path in sorted(glob(image_paths)): with open(image_path, 'rb') as f: images.append(f.read()) return images", "a 0-1 array of shape (episodes) where episodes is the number of episodes.", "return values @staticmethod def get_success_rate(experiment, seed, step): \"\"\"get the success rate for a", "correct return result @staticmethod @lru_cache(maxsize=10) def get_model_states(experiment, seed, checkpoint): \"\"\" Returns an array", "{} steps = [] min_length = 100 max_length = 0 train = {}", "\"\"\"Get the last gradient for the model and episode Returns: (value, x, y)", "experiment and seed. experiment and seed have to be passed. if option ==" ]
[ "\"GigabitEthernet1/0/32\": { \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": { \"not advertised\": {", "{ \"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\",", "\"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not", "\"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}',", "expected_output = { \"interfaces\": { \"GigabitEthernet1/0/32\": { \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": {", "\"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\",", "\"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\":", "'{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } } }, }", "\"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } } }, } }, \"total_entries\": 1,", "{ \"222\": { \"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\",", "\"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not advertised\",", "advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } }", "\"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", }", "\"222\": { \"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\":", "\"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } } }, } }, \"total_entries\": 1, }", "{ \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": { \"not advertised\": { \"neighbor_id\":", "{ \"GigabitEthernet1/0/32\": { \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": { \"not advertised\":", "{ \"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\",", "\"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\":", "\"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\":", "\"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\",", "\"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } }", "\"interfaces\": { \"GigabitEthernet1/0/32\": { \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": { \"not", "{ \"interfaces\": { \"GigabitEthernet1/0/32\": { \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\": {", "\"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not", "92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } } }, } }, \"total_entries\":", "\"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\":", "\"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } } },", "= { \"interfaces\": { \"GigabitEthernet1/0/32\": { \"if_name\": \"GigabitEthernet1/0/32\", \"port_id\": { \"222\": { \"neighbors\":", "\"time_remaining\": 92, \"management_address\": \"0000:0000:0000:0000:0000:ffff:7f00:0001\", \"auto_negotiation\": \"not supported\", } } } }, } },", "\"port_id\": { \"222\": { \"neighbors\": { \"not advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\":", "\"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92, \"management_address\":", "advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\":", "\"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\", \"system_description\": '{\"SN\":\"SN-NR\",\"Owner\":\"OWNER\"}', \"time_remaining\": 92,", "advertised\": { \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not", "{ \"neighbor_id\": \"not advertised\", \"chassis_id\": \"FE80::EC22:9A75:BBC7:71AF\", \"port_id\": \"222\", \"port_description\": \"Description\", \"system_name\": \"not advertised\"," ]
[ "from numpy import array, zeros, dot if __name__ == \"__main__\": factor = 2", "= (dict_size, dimension) ) M = zeros( (n_examples, dict_size + 1) ) M[:,", "5 target_sparsity = 3 n_examples = 10 dimension = 4 rs = rn.RandomState(0)", "M = zeros( (n_examples, dict_size + 1) ) M[:, :target_sparsity] = rs.normal(size =", "ksvd import KSVD import numpy.random as rn from numpy import array, zeros, dot", "(n_examples, dict_size + 1) ) M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) )", "rs.normal(size = (dict_size, dimension) ) M = zeros( (n_examples, dict_size + 1) )", "= 4 rs = rn.RandomState(0) D = rs.normal(size = (dict_size, dimension) ) M", "zeros( (n_examples, dict_size + 1) ) M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity)", "= (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X = dot(M, D) KSVD(X,", "numpy.random as rn from numpy import array, zeros, dot if __name__ == \"__main__\":", "dimension) ) M = zeros( (n_examples, dict_size + 1) ) M[:, :target_sparsity] =", "target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X = dot(M, D) KSVD(X, dict_size, target_sparsity,", "n_examples = 10 dimension = 4 rs = rn.RandomState(0) D = rs.normal(size =", "target_sparsity = 3 n_examples = 10 dimension = 4 rs = rn.RandomState(0) D", "zeros, dot if __name__ == \"__main__\": factor = 2 dict_size = 5 target_sparsity", "import KSVD import numpy.random as rn from numpy import array, zeros, dot if", "dimension = 4 rs = rn.RandomState(0) D = rs.normal(size = (dict_size, dimension) )", "import numpy.random as rn from numpy import array, zeros, dot if __name__ ==", "10 dimension = 4 rs = rn.RandomState(0) D = rs.normal(size = (dict_size, dimension)", "= 2 dict_size = 5 target_sparsity = 3 n_examples = 10 dimension =", "dot if __name__ == \"__main__\": factor = 2 dict_size = 5 target_sparsity =", "M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X", "\"__main__\": factor = 2 dict_size = 5 target_sparsity = 3 n_examples = 10", "array, zeros, dot if __name__ == \"__main__\": factor = 2 dict_size = 5", "3 n_examples = 10 dimension = 4 rs = rn.RandomState(0) D = rs.normal(size", "1) ) M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples,", "== \"__main__\": factor = 2 dict_size = 5 target_sparsity = 3 n_examples =", "= 5 target_sparsity = 3 n_examples = 10 dimension = 4 rs =", ") M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size)", "D = rs.normal(size = (dict_size, dimension) ) M = zeros( (n_examples, dict_size +", "= rn.RandomState(0) D = rs.normal(size = (dict_size, dimension) ) M = zeros( (n_examples,", "2 dict_size = 5 target_sparsity = 3 n_examples = 10 dimension = 4", "as rn from numpy import array, zeros, dot if __name__ == \"__main__\": factor", "rs = rn.RandomState(0) D = rs.normal(size = (dict_size, dimension) ) M = zeros(", "factor = 2 dict_size = 5 target_sparsity = 3 n_examples = 10 dimension", "rs.normal(size = (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X = dot(M, D)", "import array, zeros, dot if __name__ == \"__main__\": factor = 2 dict_size =", "(n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X = dot(M, D) KSVD(X, dict_size,", "KSVD import numpy.random as rn from numpy import array, zeros, dot if __name__", "rn from numpy import array, zeros, dot if __name__ == \"__main__\": factor =", "= 3 n_examples = 10 dimension = 4 rs = rn.RandomState(0) D =", "from ksvd import KSVD import numpy.random as rn from numpy import array, zeros,", "= rs.normal(size = (dict_size, dimension) ) M = zeros( (n_examples, dict_size + 1)", "= zeros( (n_examples, dict_size + 1) ) M[:, :target_sparsity] = rs.normal(size = (n_examples,", "dict_size = 5 target_sparsity = 3 n_examples = 10 dimension = 4 rs", "+ 1) ) M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) ) M =", "= rs.normal(size = (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X = dot(M,", ") M = zeros( (n_examples, dict_size + 1) ) M[:, :target_sparsity] = rs.normal(size", "= 10 dimension = 4 rs = rn.RandomState(0) D = rs.normal(size = (dict_size,", "__name__ == \"__main__\": factor = 2 dict_size = 5 target_sparsity = 3 n_examples", "dict_size + 1) ) M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) ) M", "(dict_size, dimension) ) M = zeros( (n_examples, dict_size + 1) ) M[:, :target_sparsity]", ") M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X = dot(M, D) KSVD(X, dict_size, target_sparsity, 1000)", "<filename>tests/test_ksvd_simple.py from ksvd import KSVD import numpy.random as rn from numpy import array,", "4 rs = rn.RandomState(0) D = rs.normal(size = (dict_size, dimension) ) M =", "rn.RandomState(0) D = rs.normal(size = (dict_size, dimension) ) M = zeros( (n_examples, dict_size", "if __name__ == \"__main__\": factor = 2 dict_size = 5 target_sparsity = 3", "numpy import array, zeros, dot if __name__ == \"__main__\": factor = 2 dict_size", ":target_sparsity] = rs.normal(size = (n_examples, target_sparsity) ) M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size) X =" ]
[ "\".join(\"{}: {}\".format(k, getattr(self, k)) for k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ +", "ListNode: if isinstance(l1, list): l1 = ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int):", "None cur = self for i in val[1:]: cur.next = ListNode(i) cur =", "x = l1.val if l1 else 0 y = l2.val if l2 else", "ListNode(l2) carry = 0 restemper = ListNode(0) res = restemper while l1 or", "carry = 0 restemper = ListNode(0) res = restemper while l1 or l2:", "{\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode)", "+ \"}\" class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: if", "l2: l2 = l2.next if carry > 0: restemper.next = carry return res.next", "res = restemper while l1 or l2: x = l1.val if l1 else", "0 y = l2.val if l2 else 0 s = x + y", "self for i in val[1:]: cur.next = ListNode(i) cur = cur.next def gatherAttrs(self):", "gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for k in self.__dict__.keys()) def __str__(self):", "for i in val[1:]: cur.next = ListNode(i) cur = cur.next def gatherAttrs(self): return", "__str__(self): return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def", "ListNode(0) res = restemper while l1 or l2: x = l1.val if l1", "l1 else 0 y = l2.val if l2 else 0 s = x", "def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: if isinstance(l1, list): l1 =", "= ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1) l2 = ListNode(l2) carry =", "def __init__(self, val): if isinstance(val, int): self.val = val self.next = None elif", "restemper.next = carry return res.next # @lc code=end if __name__ == \"__main__\": test", "0 s = x + y + carry carry = s // 10", "= l2.val if l2 else 0 s = x + y + carry", "ListNode(): def __init__(self, val): if isinstance(val, int): self.val = val self.next = None", "// 10 restemper.next = ListNode(s % 10) restemper = restemper.next if l1: l1", "ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1) l2 = ListNode(l2)", "val): if isinstance(val, int): self.val = val self.next = None elif isinstance(val, list):", "elif isinstance(val, list): self.val = val[0] self.next = None cur = self for", "l2 = ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1) l2 = ListNode(l2) carry", "def __str__(self): return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution:", "+ \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self, l1: ListNode,", "% 10) restemper = restemper.next if l1: l1 = l1.next if l2: l2", "l2.val if l2 else 0 s = x + y + carry carry", "val[0] self.next = None cur = self for i in val[1:]: cur.next =", "= val self.next = None elif isinstance(val, list): self.val = val[0] self.next =", "if isinstance(l1, int): l1 = ListNode(l1) l2 = ListNode(l2) carry = 0 restemper", "10 restemper.next = ListNode(s % 10) restemper = restemper.next if l1: l1 =", "for k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs())", "else 0 s = x + y + carry carry = s //", "carry return res.next # @lc code=end if __name__ == \"__main__\": test = Solution()", "ListNode) -> ListNode: if isinstance(l1, list): l1 = ListNode(l1) l2 = ListNode(l2) if", "carry > 0: restemper.next = carry return res.next # @lc code=end if __name__", "= restemper while l1 or l2: x = l1.val if l1 else 0", "int): l1 = ListNode(l1) l2 = ListNode(l2) carry = 0 restemper = ListNode(0)", "= s // 10 restemper.next = ListNode(s % 10) restemper = restemper.next if", "getattr(self, k)) for k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \" {\"", "in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\"", "y + carry carry = s // 10 restemper.next = ListNode(s % 10)", "= None elif isinstance(val, list): self.val = val[0] self.next = None cur =", "> 0: restemper.next = carry return res.next # @lc code=end if __name__ ==", "ListNode(i) cur = cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for", "k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) +", "l1 or l2: x = l1.val if l1 else 0 y = l2.val", "else 0 y = l2.val if l2 else 0 s = x +", "self.next = None cur = self for i in val[1:]: cur.next = ListNode(i)", "l1 = ListNode(l1) l2 = ListNode(l2) carry = 0 restemper = ListNode(0) res", "0 restemper = ListNode(0) res = restemper while l1 or l2: x =", "if isinstance(l1, list): l1 = ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int): l1", "= ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1) l2 =", "+ carry carry = s // 10 restemper.next = ListNode(s % 10) restemper", "list): l1 = ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1)", "+ \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) ->", "restemper = ListNode(0) res = restemper while l1 or l2: x = l1.val", "0: restemper.next = carry return res.next # @lc code=end if __name__ == \"__main__\":", "if l2: l2 = l2.next if carry > 0: restemper.next = carry return", "isinstance(l1, list): l1 = ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int): l1 =", "isinstance(val, list): self.val = val[0] self.next = None cur = self for i", "val self.next = None elif isinstance(val, list): self.val = val[0] self.next = None", "= ListNode(i) cur = cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k))", "+ y + carry carry = s // 10 restemper.next = ListNode(s %", "return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for k in self.__dict__.keys()) def __str__(self): return", "return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self,", "if l1 else 0 y = l2.val if l2 else 0 s =", "carry = s // 10 restemper.next = ListNode(s % 10) restemper = restemper.next", "if isinstance(val, int): self.val = val self.next = None elif isinstance(val, list): self.val", "= ListNode(l1) l2 = ListNode(l2) carry = 0 restemper = ListNode(0) res =", "\" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self, l1: ListNode, l2:", "cur = self for i in val[1:]: cur.next = ListNode(i) cur = cur.next", "restemper.next if l1: l1 = l1.next if l2: l2 = l2.next if carry", "= ListNode(s % 10) restemper = restemper.next if l1: l1 = l1.next if", "addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: if isinstance(l1, list): l1 = ListNode(l1)", "= ListNode(0) res = restemper while l1 or l2: x = l1.val if", "= l1.next if l2: l2 = l2.next if carry > 0: restemper.next =", "l1.val if l1 else 0 y = l2.val if l2 else 0 s", "None elif isinstance(val, list): self.val = val[0] self.next = None cur = self", "s // 10 restemper.next = ListNode(s % 10) restemper = restemper.next if l1:", "if carry > 0: restemper.next = carry return res.next # @lc code=end if", "= l2.next if carry > 0: restemper.next = carry return res.next # @lc", "= self for i in val[1:]: cur.next = ListNode(i) cur = cur.next def", "in val[1:]: cur.next = ListNode(i) cur = cur.next def gatherAttrs(self): return \", \".join(\"{}:", "= cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for k in", "ListNode(s % 10) restemper = restemper.next if l1: l1 = l1.next if l2:", "cur.next = ListNode(i) cur = cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self,", "l2 = ListNode(l2) carry = 0 restemper = ListNode(0) res = restemper while", "cur = cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for k", "= 0 restemper = ListNode(0) res = restemper while l1 or l2: x", "= ListNode(l2) carry = 0 restemper = ListNode(0) res = restemper while l1", "restemper = restemper.next if l1: l1 = l1.next if l2: l2 = l2.next", "class ListNode(): def __init__(self, val): if isinstance(val, int): self.val = val self.next =", "k)) for k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \" {\" +", "ListNode(l1) l2 = ListNode(l2) carry = 0 restemper = ListNode(0) res = restemper", "carry carry = s // 10 restemper.next = ListNode(s % 10) restemper =", "i in val[1:]: cur.next = ListNode(i) cur = cur.next def gatherAttrs(self): return \",", "int): self.val = val self.next = None elif isinstance(val, list): self.val = val[0]", "isinstance(val, int): self.val = val self.next = None elif isinstance(val, list): self.val =", "Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: if isinstance(l1, list): l1", "l1.next if l2: l2 = l2.next if carry > 0: restemper.next = carry", "return res.next # @lc code=end if __name__ == \"__main__\": test = Solution() print(test.addTwoNumbers([5,6],", "x + y + carry carry = s // 10 restemper.next = ListNode(s", "list): self.val = val[0] self.next = None cur = self for i in", "if l1: l1 = l1.next if l2: l2 = l2.next if carry >", "l1 = ListNode(l1) l2 = ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1) l2", "restemper.next = ListNode(s % 10) restemper = restemper.next if l1: l1 = l1.next", "l2 else 0 s = x + y + carry carry = s", "cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for k in self.__dict__.keys())", "= val[0] self.next = None cur = self for i in val[1:]: cur.next", "self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class", "ListNode(l2) if isinstance(l1, int): l1 = ListNode(l1) l2 = ListNode(l2) carry = 0", "while l1 or l2: x = l1.val if l1 else 0 y =", "l1: l1 = l1.next if l2: l2 = l2.next if carry > 0:", "res.next # @lc code=end if __name__ == \"__main__\": test = Solution() print(test.addTwoNumbers([5,6], [5,7]))", "val[1:]: cur.next = ListNode(i) cur = cur.next def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k,", "s = x + y + carry carry = s // 10 restemper.next", "l2: x = l1.val if l1 else 0 y = l2.val if l2", "= l1.val if l1 else 0 y = l2.val if l2 else 0", "isinstance(l1, int): l1 = ListNode(l1) l2 = ListNode(l2) carry = 0 restemper =", "l2 = l2.next if carry > 0: restemper.next = carry return res.next #", "l1 = l1.next if l2: l2 = l2.next if carry > 0: restemper.next", "l2.next if carry > 0: restemper.next = carry return res.next # @lc code=end", "l2: ListNode) -> ListNode: if isinstance(l1, list): l1 = ListNode(l1) l2 = ListNode(l2)", "= carry return res.next # @lc code=end if __name__ == \"__main__\": test =", "\"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:", "__init__(self, val): if isinstance(val, int): self.val = val self.next = None elif isinstance(val,", "\", \".join(\"{}: {}\".format(k, getattr(self, k)) for k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__", "self.val = val self.next = None elif isinstance(val, list): self.val = val[0] self.next", "-> ListNode: if isinstance(l1, list): l1 = ListNode(l1) l2 = ListNode(l2) if isinstance(l1,", "if l2 else 0 s = x + y + carry carry =", "self.__class__.__name__ + \" {\" + \"{}\".format(self.gatherAttrs()) + \"}\" class Solution: def addTwoNumbers(self, l1:", "ListNode, l2: ListNode) -> ListNode: if isinstance(l1, list): l1 = ListNode(l1) l2 =", "class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: if isinstance(l1, list):", "{}\".format(k, getattr(self, k)) for k in self.__dict__.keys()) def __str__(self): return self.__class__.__name__ + \"", "restemper while l1 or l2: x = l1.val if l1 else 0 y", "self.next = None elif isinstance(val, list): self.val = val[0] self.next = None cur", "y = l2.val if l2 else 0 s = x + y +", "def gatherAttrs(self): return \", \".join(\"{}: {}\".format(k, getattr(self, k)) for k in self.__dict__.keys()) def", "or l2: x = l1.val if l1 else 0 y = l2.val if", "= x + y + carry carry = s // 10 restemper.next =", "10) restemper = restemper.next if l1: l1 = l1.next if l2: l2 =", "= restemper.next if l1: l1 = l1.next if l2: l2 = l2.next if", "self.val = val[0] self.next = None cur = self for i in val[1:]:", "= None cur = self for i in val[1:]: cur.next = ListNode(i) cur", "\"}\" class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: if isinstance(l1,", "l1: ListNode, l2: ListNode) -> ListNode: if isinstance(l1, list): l1 = ListNode(l1) l2" ]
[ "_, question in self.get_questions_instances() ] if len(questions_instances) != len(questions): raise \"must provide all", "associated with this exam\" question[\"validate\"] = True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]):", "questions_instances = [ question for _, question in self.get_questions_instances() ] if len(questions_instances) !=", "from operator import itemgetter from users.models import User from django.db import models import", "question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def time(self): return timezone.now() -", "from django.urls import reverse from operator import itemgetter from users.models import User from", "in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def", "= models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name + \": \" + self.user.get_full_name()", "False return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in", "\" + self.question def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance =", "as _ from django.core.exceptions import ObjectDoesNotExist from courses.models import Content from django.utils import", "for en integer part and 2 for decimal part\" ), error_messages={ \"blank\": (\"you", "= models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as", "approved\"'), # help_text=('point value of the question. Max 5 digits: 3 for en", "sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for _, question in self.get_questions_instances() ] if", "60): self.number_of_try = 0 self.save() return True return False return True def get_user_answer(self):", "help_text=(\"Required. 150 characters or fewer.\"), ) description = models.TextField() approved = models.DecimalField( (\"minimun", "courses.models import Content from django.utils import timezone from django.urls import reverse from operator", "the minimum points necessary to pass this exam. Max 5 digits: 3 for", "5 digits: same as \"minimun points to approved\"'), # help_text=('point value of the", "\"answer for question, \" + str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam", "question in self.get_questions_instances() ] if len(questions_instances) != len(questions): raise \"must provide all questions", "questions associated with this exam\" validation = {} for question_instance, question in zip(questions_instances,", "default=False, help_text=(\"the answer is correct?.\"), ) def __str__(self): return \"answer for question, \"", "minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" +", "\"null\": (\"you must provied the minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def", "'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def", "questions with their answers to evaluate\" for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"],", "Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE)", "user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def time(self): return", "models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters", "MaxValueValidator, MinValueValidator from django.utils.translation import gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from", "question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved)", "= models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A", "= kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for", "MinValueValidator from django.utils.translation import gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from courses.models", "!= len(questions): raise \"must provide all questions associated with this exam\" validation =", "answer[1]: question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"]", "= models.IntegerField( (\"number of correct answers for this question\"), help_text=(\"number of correct answers", "correct_answer = models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), ) def __str__(self): return \"answer", "questionlistForms: for questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(),", "float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved) return questions class Question(models.Model):", "all questions associated with this exam\" validation = {} for question_instance, question in", "= timezone.now() self.user_answer = userQuestions new_points = 0 for key, question in userQuestions.items():", "models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the", "= kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms: for", "# help_text=('point value of the question. Max 5 digits: 3 for en integer", "None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm in", "reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\",", "models import ast class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField(", ") last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name + \": \"", "django.urls import reverse from operator import itemgetter from users.models import User from django.db", "= float(self.approved) return questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question =", "max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A question with that", "the point value.\"), }, ) def __str__(self): return \"Question: \" + self.question def", "self.exam.name + \": \" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3: if", "kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\":", "get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not questions: raise \"they", "1 for index in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\",", "integer part and 2 for decimal part\" ), error_messages={ \"blank\": (\"you must provied", "import gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from courses.models import Content from", "max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"), ) description = models.TextField() approved", "Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm in questionlistForms: question_instance = questionForm.save(commit=False)", "\"Question: \" + self.question def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance", "user_answer = models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)],", "question\"), help_text=(\"number of correct answers for this question\"), default=0, ) question_value = models.DecimalField(", "Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for index in range(questions.count())] return zip(numbers_of_questions, questions)", "= self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model):", "ObjectDoesNotExist from courses.models import Content from django.utils import timezone from django.urls import reverse", "questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600,", "return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE)", "blank=False, null=False, help_text=( \"Put here the minimum points necessary to pass this exam.", "= {} for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances =", "for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for", "reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if", "minimum points necessary to pass this exam. Max 5 digits: 3 for en", "None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm in", "\": \" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3: if self.time().days >", "and %s minut(s)\" % ( time.seconds // 3600, (time.seconds // 60) % 60,", "= models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0),", "on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"),", "question for _, question in self.get_questions_instances() ] if len(questions_instances) != len(questions): raise \"must", "provide all questions associated with this exam\" validation = {} for question_instance, question", "True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] =", "validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide", "float(self.approved) return questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField(", "error_messages={\"unique\": (\"A question with that name already exists.\"),}, ) correct_answers = models.IntegerField( (\"number", "if self.time().days > 0 or self.time().seconds > (8 * 60 * 60): self.number_of_try", "(\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"), ) description =", "or fewer.\"), ) description = models.TextField() approved = models.DecimalField( (\"minimun points to approved\"),", "Content from django.utils import timezone from django.urls import reverse from operator import itemgetter", "time.seconds // 3600, (time.seconds // 60) % 60, ) def try_exam(self, userQuestions): self.number_of_try", "= models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points", "time(self): return timezone.now() - self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 *", "get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self,", "answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value)", "max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun points to approved\"'),", "question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) !=", "question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"] =", "def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for index in", "600 characters or fewer.\"), error_messages={\"unique\": (\"A question with that name already exists.\"),}, )", "self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3: if self.time().days > 0 or self.time().seconds", "kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not", "+ 1 for index in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): # return", "name already exists.\"),}, ) correct_answers = models.IntegerField( (\"number of correct answers for this", "return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return", "if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"] = (", "necessary to pass this exam. Max 5 digits: 3 for en integer part", "import reverse from operator import itemgetter from users.models import User from django.db import", "self}, ) def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions =", "answers for this question\"), default=0, ) question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2,", "questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def", "import ast class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content,", "part\" ), error_messages={ \"blank\": (\"you must provied the minimun points.\"), \"null\": (\"you must", "question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else", "True return False return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for", "from django.core.validators import MaxValueValidator, MinValueValidator from django.utils.translation import gettext_lazy as _ from django.core.exceptions", "last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name + \": \" +", ") def __str__(self): return \"Question: \" + self.question def save(self, *args, **kwargs): answerlistForms", "True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"]", "\" + str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE)", "60, microseconds=0) - self.time() ) return \"%s hour(s) and %s minut(s)\" % (", "self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id})", "must provied the minimun points.\"), \"null\": (\"you must provied the minimun points.\"), },", "this question\"), default=0, ) question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)],", "__str__(self): return \"answer for question, \" + str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User,", "default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer", "def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam,", "default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the minimum points", "get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id,", "of the question. Max 5 digits: 3 for en integer part and 2", "time = ( timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0) - self.time() )", "\"Quiz: \" + self.name def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if", "__str__(self): return \"Question: \" + self.question def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\",", "def can_take_exam(self): if self.number_of_try >= 3: if self.time().days > 0 or self.time().seconds >", "answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self):", "from django.utils import timezone from django.urls import reverse from operator import itemgetter from", "models.IntegerField( (\"number of correct answers for this question\"), help_text=(\"number of correct answers for", "and 2 for decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you must provied the", "def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question =", "number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True) def", "self.save() return True return False return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del", "60, ) def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try = timezone.now() self.user_answer =", "= self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name() def get_questions_instances(self):", "reverse from operator import itemgetter from users.models import User from django.db import models", "question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name() def", "default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] )", "timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0) - self.time() ) return \"%s hour(s)", "= models.TextField() approved = models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0),", "minut(s)\" % ( time.seconds // 3600, (time.seconds // 60) % 60, ) def", "{} for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances()", "help_text=(\"number of correct answers for this question\"), default=0, ) question_value = models.DecimalField( (\"points\"),", "en integer part and 2 for decimal part\" ), error_messages={ \"blank\": (\"you must", "microseconds=0) - self.time() ) return \"%s hour(s) and %s minut(s)\" % ( time.seconds", "kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\":", "django.core.exceptions import ObjectDoesNotExist from courses.models import Content from django.utils import timezone from django.urls", "float(0) ) questions[\"approved\"] = float(self.approved) return questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None,", "return questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"),", "blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False,", "user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return", "* 60 * 60, microseconds=0) - self.time() ) return \"%s hour(s) and %s", "__str__(self): return \"Quiz: \" + self.name def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\",", "help_text=('Max 5 digits: same as \"minimun points to approved\"'), # help_text=('point value of", "user_answer def time(self): return timezone.now() - self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0,", "2 for decimal part\" ), error_messages={ \"blank\": (\"you must provied the minimun points.\"),", "django.core.validators import MaxValueValidator, MinValueValidator from django.utils.translation import gettext_lazy as _ from django.core.exceptions import", "import itemgetter from users.models import User from django.db import models import ast class", "( float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved) return questions class", "en integer part and 2 for decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you", "question with that name already exists.\"),}, ) correct_answers = models.IntegerField( (\"number of correct", "points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here", "this exam\" question[\"validate\"] = True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer", "\" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3: if self.time().days > 0", "continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"]", "null=False, error_messages={ \"blank\": (\"you must provied the point value.\"), \"null\": (\"you must provied", "0 self.save() return True return False return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer)", "points.\"), \"null\": (\"you must provied the minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True)", "2 for decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you must provied the point", "decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try =", "models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"), max_length=150,", "question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved) return questions class Question(models.Model): exam =", "0 or self.time().seconds > (8 * 60 * 60): self.number_of_try = 0 self.save()", "150 characters or fewer.\"), ) description = models.TextField() approved = models.DecimalField( (\"minimun points", "\"blank\": (\"you must provied the point value.\"), \"null\": (\"you must provied the point", "of correct answers for this question\"), default=0, ) question_value = models.DecimalField( (\"points\"), default=None,", "already exists.\"),}, ) correct_answers = models.IntegerField( (\"number of correct answers for this question\"),", "and 2 for decimal part\" ), error_messages={ \"blank\": (\"you must provied the minimun", "self.user_answer = userQuestions new_points = 0 for key, question in userQuestions.items(): if key", "name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"), ) description = models.TextField()", "provide the questions with their answers to evaluate\" for question in questions.values(): question[\"Answers\"]", "points necessary to pass this exam. Max 5 digits: 3 for en integer", "return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None,", "import Content from django.utils import timezone from django.urls import reverse from operator import", "self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0)", "return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values():", "help_text=( \"Put here the minimum points necessary to pass this exam. Max 5", "decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you must provied the point value.\"), \"null\":", "(\"you must provied the minimun points.\"), \"null\": (\"you must provied the minimun points.\"),", "def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0) -", "60) % 60, ) def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try = timezone.now()", "or fewer.\"), error_messages={\"unique\": (\"A question with that name already exists.\"),}, ) correct_answers =", "(\"A question with that name already exists.\"),}, ) correct_answers = models.IntegerField( (\"number of", "3 for en integer part and 2 for decimal part\" ), error_messages={ \"blank\":", "kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\":", "correct answers for this question\"), default=0, ) question_value = models.DecimalField( (\"points\"), default=None, max_digits=5,", ") name = models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or", "if len(questions_instances) != len(questions): raise \"must provide all questions associated with this exam\"", "= userQuestions new_points = 0 for key, question in userQuestions.items(): if key !=", "+ self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3: if self.time().days > 0 or", "all answers associated with this exam\" question[\"validate\"] = True for answer_instance, answer in", "get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for index in range(questions.count())]", "= questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return", "for questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\":", "for decimal part\" ), error_messages={ \"blank\": (\"you must provied the minimun points.\"), \"null\":", "+ str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer", "get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None,", "// 60) % 60, ) def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try =", "null=False, help_text=( \"Put here the minimum points necessary to pass this exam. Max", "zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise", "= models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600", "for index in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id,", "(\"number of correct answers for this question\"), help_text=(\"number of correct answers for this", "+ \": \" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3: if self.time().days", "digits: 3 for en integer part and 2 for decimal part\" ), error_messages={", "kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm in questionlistForms: question_instance", "!= answer[1]: question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if", "provied the point value.\"), }, ) def __str__(self): return \"Question: \" + self.question", "# return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self):", "question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for _,", "for question, \" + str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam =", "on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField(", "time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0) - self.time()", "validation = {} for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances", "+ self.name def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None):", "}, ) def __str__(self): return \"Question: \" + self.question def save(self, *args, **kwargs):", "User from django.db import models import ast class Exam(models.Model): user = models.ForeignKey(User, default=None,", "__str__(self): return self.exam.name + \": \" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try >=", "class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None,", "content = models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam", "blank=False, null=False, error_messages={ \"blank\": (\"you must provied the point value.\"), \"null\": (\"you must", "must provied the minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return", "None) super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False)", "0 for key, question in userQuestions.items(): if key != \"approved\": new_points += question[\"points\"]", "point value.\"), }, ) def __str__(self): return \"Question: \" + self.question def save(self,", "question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self):", "3: if self.time().days > 0 or self.time().seconds > (8 * 60 * 60):", "(\"you must provied the point value.\"), \"null\": (\"you must provied the point value.\"),", "help_text=('point value of the question. Max 5 digits: 3 for en integer part", "def __str__(self): return \"Question: \" + self.question def save(self, *args, **kwargs): answerlistForms =", "Max 5 digits: 3 for en integer part and 2 for decimal part\"", "return False return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question", "questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms:", "- self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 * 60 * 60,", "MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True,", "MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the minimum points necessary to pass this", "correct?.\"), ) def __str__(self): return \"answer for question, \" + str(self.question_id) class ExamUserRelations(models.Model):", "get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index + 1", "3 for en integer part and 2 for decimal part'), blank=False, null=False, error_messages={", "models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name + \": \" + self.user.get_full_name() def", "on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters", "= False continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else float(0)", "of correct answers for this question\"), help_text=(\"number of correct answers for this question\"),", "Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"), max_length=150, unique=False,", "else float(0) ) questions[\"approved\"] = float(self.approved) return questions class Question(models.Model): exam = models.ForeignKey(Exam,", "(8 * 60 * 60): self.number_of_try = 0 self.save() return True return False", "self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions", "answer is correct?.\"), ) def __str__(self): return \"answer for question, \" + str(self.question_id)", "for question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def time(self): return timezone.now()", "from django.core.exceptions import ObjectDoesNotExist from courses.models import Content from django.utils import timezone from", "error_messages={ \"blank\": (\"you must provied the minimun points.\"), \"null\": (\"you must provied the", "max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), ) def __str__(self):", "for key, question in userQuestions.items(): if key != \"approved\": new_points += question[\"points\"] if", ") def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index", "is correct?.\"), ) def __str__(self): return \"answer for question, \" + str(self.question_id) class", "questions = Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for index in range(questions.count())] return", "5 digits: 3 for en integer part and 2 for decimal part\" ),", "with that name already exists.\"),}, ) correct_answers = models.IntegerField( (\"number of correct answers", "on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points = models.DecimalField(", "'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self):", "if questionlistForms: for questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save(", "class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False,", "import models import ast class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content =", "django.utils.translation import gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from courses.models import Content", "return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return", "same as \"minimun points to approved\"'), # help_text=('point value of the question. Max", "answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save()", "= models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False,", "return timezone.now() - self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 * 60", "del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def time(self):", "max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the minimum points necessary", "django.db import models import ast class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content", "hour(s) and %s minut(s)\" % ( time.seconds // 3600, (time.seconds // 60) %", "self.name def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete()", "// 3600, (time.seconds // 60) % 60, ) def try_exam(self, userQuestions): self.number_of_try +=", "1 self.last_try = timezone.now() self.user_answer = userQuestions new_points = 0 for key, question", "this exam. Max 5 digits: 3 for en integer part and 2 for", "= Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for index in range(questions.count())] return zip(numbers_of_questions,", "provied the point value.\"), \"null\": (\"you must provied the point value.\"), }, )", "fewer.\"), error_messages={\"unique\": (\"A question with that name already exists.\"),}, ) correct_answers = models.IntegerField(", "question[\"validate\"] = True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]:", "if answerlistForms: for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question", "= question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all answers associated with", "value.\"), \"null\": (\"you must provied the point value.\"), }, ) def __str__(self): return", "% 60, ) def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try = timezone.now() self.user_answer", "\"blank\": (\"you must provied the minimun points.\"), \"null\": (\"you must provied the minimun", "characters or fewer.\"), error_messages={\"unique\": (\"A question with that name already exists.\"),}, ) correct_answers", "evaluate\" for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question", "def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try = timezone.now() self.user_answer = userQuestions new_points", "questions[\"approved\"] = float(self.approved) return questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question", "with this exam\" question[\"validate\"] = True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if", "def __str__(self): return self.exam.name + \": \" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try", "= True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"]", "answers for this question\"), help_text=(\"number of correct answers for this question\"), default=0, )", "approved = models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False,", "models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)]", "answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue", "= models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self):", "default=0, ) question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5", "in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue del question[\"Answers\"]", "answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self)", "exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required.", "must provied the point value.\"), }, ) def __str__(self): return \"Question: \" +", "questions): if not questions: raise \"they must provide the questions with their answers", "the point value.\"), \"null\": (\"you must provied the point value.\"), }, ) def", "[ question for _, question in self.get_questions_instances() ] if len(questions_instances) != len(questions): raise", "answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms:", "familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions", "correct answers for this question\"), help_text=(\"number of correct answers for this question\"), default=0,", "ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True,", "blank=False, help_text=(\"Required. 150 characters or fewer.\"), ) description = models.TextField() approved = models.DecimalField(", "models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return", "part and 2 for decimal part\" ), error_messages={ \"blank\": (\"you must provied the", "in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]):", "* 60): self.number_of_try = 0 self.save() return True return False return True def", "models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name", "ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def", "null=True) def __str__(self): return self.exam.name + \": \" + self.user.get_full_name() def can_take_exam(self): if", "**kwargs) if answerlistForms: for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"]", "save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args,", "models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), ) def __str__(self): return \"answer for question,", "on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0),", "correct_answers = models.IntegerField( (\"number of correct answers for this question\"), help_text=(\"number of correct", "answers to evaluate\" for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances =", "return \"Question: \" + self.question def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None)", "to evaluate\" for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [", "Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True, default=None,", "part'), blank=False, null=False, error_messages={ \"blank\": (\"you must provied the point value.\"), \"null\": (\"you", "return \"answer for question, \" + str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE)", "models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), ) def", "!= len(question[\"Answers\"]): raise \"must provide all answers associated with this exam\" question[\"validate\"] =", "part and 2 for decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you must provied", "question. Max 5 digits: 3 for en integer part and 2 for decimal", "kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm", "( timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0) - self.time() ) return \"%s", "= models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), )", "class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True,", "= answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self)", "to pass this exam. Max 5 digits: 3 for en integer part and", "= ( timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0) - self.time() ) return", "= ( float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved) return questions", "with this exam\" validation = {} for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"]", "return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return", "= models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"),", "= 0 for key, question in userQuestions.items(): if key != \"approved\": new_points +=", "for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if", ">= 3: if self.time().days > 0 or self.time().seconds > (8 * 60 *", "can_take_exam(self): if self.number_of_try >= 3: if self.time().days > 0 or self.time().seconds > (8", "point value.\"), \"null\": (\"you must provied the point value.\"), }, ) def __str__(self):", "familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm in answerlistForms:", "userQuestions new_points = 0 for key, question in userQuestions.items(): if key != \"approved\":", "return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return", "default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name", "new_points = 0 for key, question in userQuestions.items(): if key != \"approved\": new_points", "question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same", "with their answers to evaluate\" for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0))", "for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self", "(time.seconds // 60) % 60, ) def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try", "( time.seconds // 3600, (time.seconds // 60) % 60, ) def try_exam(self, userQuestions):", "+ self.question def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\",", "models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points =", "return True return False return True def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"]", "in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, )", "\"Put here the minimum points necessary to pass this exam. Max 5 digits:", "(\"you must provied the point value.\"), }, ) def __str__(self): return \"Question: \"", "answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam =", "default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT ) name =", "to approved\"'), # help_text=('point value of the question. Max 5 digits: 3 for", "provied the minimun points.\"), \"null\": (\"you must provied the minimun points.\"), }, )", "def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def", "integer part and 2 for decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you must", "class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer", "models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=(", "validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun points to approved\"'), # help_text=('point", "answerlistForms: for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question =", "questions: raise \"they must provide the questions with their answers to evaluate\" for", "on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer is", "for decimal part'), blank=False, null=False, error_messages={ \"blank\": (\"you must provied the point value.\"),", "**kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if", "# return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id})", "answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all answers associated", "= int(question[\"id\"]) return user_answer def time(self): return timezone.now() - self.last_try def time_until_take(self): time", "\" + self.name def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\",", "= models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0, max_digits=5,", "exam\" validation = {} for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id", "return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for", "this exam\" validation = {} for question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] =", "user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True)", "question\"), default=0, ) question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max", "self.get_questions_instances() ] if len(questions_instances) != len(questions): raise \"must provide all questions associated with", "> (8 * 60 * 60): self.number_of_try = 0 self.save() return True return", "= models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), ) def __str__(self): return \"answer for", "timezone.now() self.user_answer = userQuestions new_points = 0 for key, question in userQuestions.items(): if", "provied the minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz:", ") questions[\"approved\"] = float(self.approved) return questions class Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE)", "question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField(", "if not questions: raise \"they must provide the questions with their answers to", "key, question in userQuestions.items(): if key != \"approved\": new_points += question[\"points\"] if new_points", "False continue del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else float(0) )", "\"they must provide the questions with their answers to evaluate\" for question in", "def evaluate(self, questions): if not questions: raise \"they must provide the questions with", "the questions with their answers to evaluate\" for question in questions.values(): question[\"Answers\"] =", "len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all answers associated with this exam\" question[\"validate\"]", "* 60 * 60): self.number_of_try = 0 self.save() return True return False return", "'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id})", "reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\",", "question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\":", "blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer is correct?.\"), ) def __str__(self): return", "characters or fewer.\"), ) description = models.TextField() approved = models.DecimalField( (\"minimun points to", "self.number_of_try = 0 self.save() return True return False return True def get_user_answer(self): user_answer", "pass this exam. Max 5 digits: 3 for en integer part and 2", "get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question,", "digits: 3 for en integer part and 2 for decimal part'), blank=False, null=False,", "that name already exists.\"),}, ) correct_answers = models.IntegerField( (\"number of correct answers for", "from django.db import models import ast class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE)", "+= 1 self.last_try = timezone.now() self.user_answer = userQuestions new_points = 0 for key,", "kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not questions: raise \"they must provide the", "points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" + self.name", "(\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A question with", "exam. Max 5 digits: 3 for en integer part and 2 for decimal", "= [ question for _, question in self.get_questions_instances() ] if len(questions_instances) != len(questions):", "default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or", "default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150", "(\"you must provied the minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self):", "del question[\"Answers\"] question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"] =", "models.TextField() approved = models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)],", "return \"%s hour(s) and %s minut(s)\" % ( time.seconds // 3600, (time.seconds //", "on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer =", "return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return", "here the minimum points necessary to pass this exam. Max 5 digits: 3", "exam\" question[\"validate\"] = True for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer !=", "'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id})", "must provied the point value.\"), \"null\": (\"you must provied the point value.\"), },", ") correct_answers = models.IntegerField( (\"number of correct answers for this question\"), help_text=(\"number of", "import timezone from django.urls import reverse from operator import itemgetter from users.models import", "self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id})", "their answers to evaluate\" for question in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances", "if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all answers associated with this exam\"", "self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not questions:", "answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue del", "MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name + \":", "userQuestions.items(): if key != \"approved\": new_points += question[\"points\"] if new_points > self.points: self.points", "for answer_instance, answer in zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False", "import User from django.db import models import ast class Exam(models.Model): user = models.ForeignKey(User,", "familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\")", "reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\",", "def __str__(self): return \"answer for question, \" + str(self.question_id) class ExamUserRelations(models.Model): user =", "if question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved) return questions class Question(models.Model): exam", "**kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if", "key=itemgetter(0)) questions_instances = [ question for _, question in self.get_questions_instances() ] if len(questions_instances)", "question in userQuestions.items(): if key != \"approved\": new_points += question[\"points\"] if new_points >", ") def __str__(self): return \"answer for question, \" + str(self.question_id) class ExamUserRelations(models.Model): user", "from django.utils.translation import gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from courses.models import", "not questions: raise \"they must provide the questions with their answers to evaluate\"", "return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not questions: raise \"they must", "default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun points to", "= models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600,", "models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2,", "\"null\": (\"you must provied the point value.\"), }, ) def __str__(self): return \"Question:", "= familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return", "len(questions): raise \"must provide all questions associated with this exam\" validation = {}", "'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def", "answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def", "must provide the questions with their answers to evaluate\" for question in questions.values():", "the question. Max 5 digits: 3 for en integer part and 2 for", "= models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False,", "self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index + 1 for index", "or self.time().seconds > (8 * 60 * 60): self.number_of_try = 0 self.save() return", "= ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer", "ast class Exam(models.Model): user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True,", "zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\",", "return \"Quiz: \" + self.name def save(self, *args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None)", "for this question\"), default=0, ) question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0),", "save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args,", "timezone.now() - self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8 * 60 *", "questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for _, question in", "exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer = models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0,", "Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer =", "reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not questions: raise \"they must provide", "def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions = Question.objects.filter(exam=self) numbers_of_questions = [index +", "return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions):", "question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all answers associated with this", "models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try", "name = models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"),", "self.time() ) return \"%s hour(s) and %s minut(s)\" % ( time.seconds // 3600,", "len(question[\"Answers\"]): raise \"must provide all answers associated with this exam\" question[\"validate\"] = True", "if self.number_of_try >= 3: if self.time().days > 0 or self.time().seconds > (8 *", "(\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put", "approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the minimum", "max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try", "Max 5 digits: 3 for en integer part and 2 for decimal part'),", "%s minut(s)\" % ( time.seconds // 3600, (time.seconds // 60) % 60, )", "gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist from courses.models import Content from django.utils", "from users.models import User from django.db import models import ast class Exam(models.Model): user", "key != \"approved\": new_points += question[\"points\"] if new_points > self.points: self.points = new_points", "help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A question with that name already exists.\"),},", "models.CharField( (\"question\"), max_length=600, blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A question", "Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question", "= models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT )", "blank=True, null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try =", "in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id})", "timezone from django.urls import reverse from operator import itemgetter from users.models import User", "answer_instance.question = self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class", "reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\",", "null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField(", "models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" + self.name def save(self, *args, **kwargs): questionlistForms", "return user_answer def time(self): return timezone.now() - self.last_try def time_until_take(self): time = (", "in user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def time(self): return timezone.now() - self.last_try", "}, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" + self.name def", "provide all answers associated with this exam\" question[\"validate\"] = True for answer_instance, answer", "_ from django.core.exceptions import ObjectDoesNotExist from courses.models import Content from django.utils import timezone", "= sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for _, question in self.get_questions_instances() ]", "for _, question in self.get_questions_instances() ] if len(questions_instances) != len(questions): raise \"must provide", "), error_messages={ \"blank\": (\"you must provied the minimun points.\"), \"null\": (\"you must provied", "def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def", "raise \"must provide all questions associated with this exam\" validation = {} for", "% ( time.seconds // 3600, (time.seconds // 60) % 60, ) def try_exam(self,", "<filename>src/exams/models.py from django.core.validators import MaxValueValidator, MinValueValidator from django.utils.translation import gettext_lazy as _ from", "return self.exam.name + \": \" + self.user.get_full_name() def can_take_exam(self): if self.number_of_try >= 3:", "question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name() def get_questions_instances(self): questions =", ") create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" + self.name def save(self,", "= 0 self.save() return True return False return True def get_user_answer(self): user_answer =", "question[\"id\"] = int(question[\"id\"]) return user_answer def time(self): return timezone.now() - self.last_try def time_until_take(self):", "kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\":", "for this question\"), help_text=(\"number of correct answers for this question\"), default=0, ) question_value", "MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun points to approved\"'), # help_text=('point value", "error_messages={ \"blank\": (\"you must provied the point value.\"), \"null\": (\"you must provied the", "null=True, default=None, on_delete=models.SET_DEFAULT ) name = models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required.", "self).save(*args, **kwargs) if answerlistForms: for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam =", "questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self},", "points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0,", "questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id,", "None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm in questionlistForms: question_instance =", "kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm in answerlistForms: answer_instance =", "get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self):", "zip(answers_instances, question[\"Answers\"]): if answer_instance.correct_answer != answer[1]: question[\"validate\"] = False continue del question[\"Answers\"] question[\"points\"]", "from courses.models import Content from django.utils import timezone from django.urls import reverse from", "self.number_of_try += 1 self.last_try = timezone.now() self.user_answer = userQuestions new_points = 0 for", "question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for _, question in self.get_questions_instances()", "question, \" + str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam,", "exists.\"),}, ) correct_answers = models.IntegerField( (\"number of correct answers for this question\"), help_text=(\"number", "default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer", "the minimun points.\"), \"null\": (\"you must provied the minimun points.\"), }, ) create_date", "decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the minimum points necessary to", "exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"),", "seconds=8 * 60 * 60, microseconds=0) - self.time() ) return \"%s hour(s) and", "self.id}) def evaluate(self, questions): if not questions: raise \"they must provide the questions", "blank=False, unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A question with that name", "models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun", "help_text=(\"the answer is correct?.\"), ) def __str__(self): return \"answer for question, \" +", "question_instance, question in zip(questions_instances, questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances)", "get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] = int(question[\"id\"])", "len(questions_instances) != len(questions): raise \"must provide all questions associated with this exam\" validation", ") return \"%s hour(s) and %s minut(s)\" % ( time.seconds // 3600, (time.seconds", "= question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all", "this question\"), help_text=(\"number of correct answers for this question\"), default=0, ) question_value =", "question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must provide all answers", "- self.time() ) return \"%s hour(s) and %s minut(s)\" % ( time.seconds //", "index in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id,", "self answer_instance.save() def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam", "return zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return", "models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE) answer = models.CharField((\"answer\"), max_length=600, blank=False,)", "validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True) def __str__(self): return self.exam.name +", "kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id}) return reverse(\"exams:exam-take\", kwargs={\"idExam\": self.id}) def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\":", "str(self.question_id) class ExamUserRelations(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) exam = models.ForeignKey(Exam, on_delete=models.CASCADE) user_answer =", "blank=True, null=True) def __str__(self): return self.exam.name + \": \" + self.user.get_full_name() def can_take_exam(self):", "reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\",", "answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self): return", "create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" + self.name def save(self, *args,", "description = models.TextField() approved = models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5, decimal_places=2,", "user_answer.values(): question[\"id\"] = int(question[\"id\"]) return user_answer def time(self): return timezone.now() - self.last_try def", "!= \"approved\": new_points += question[\"points\"] if new_points > self.points: self.points = new_points self.save()", "def time(self): return timezone.now() - self.last_try def time_until_take(self): time = ( timezone.timedelta(days=0, seconds=8", "if key != \"approved\": new_points += question[\"points\"] if new_points > self.points: self.points =", "decimal part\" ), error_messages={ \"blank\": (\"you must provied the minimun points.\"), \"null\": (\"you", "\"%s hour(s) and %s minut(s)\" % ( time.seconds // 3600, (time.seconds // 60)", "self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id})", "raise \"they must provide the questions with their answers to evaluate\" for question", "\"minimun points to approved\"'), # help_text=('point value of the question. Max 5 digits:", "range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) #", "self.question def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None)", "in userQuestions.items(): if key != \"approved\": new_points += question[\"points\"] if new_points > self.points:", "evaluate(self, questions): if not questions: raise \"they must provide the questions with their", "validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the minimum points necessary to pass", "fewer.\"), ) description = models.TextField() approved = models.DecimalField( (\"minimun points to approved\"), default=None,", "users.models import User from django.db import models import ast class Exam(models.Model): user =", "= [index + 1 for index in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self):", "3600, (time.seconds // 60) % 60, ) def try_exam(self, userQuestions): self.number_of_try += 1", "self.time().days > 0 or self.time().seconds > (8 * 60 * 60): self.number_of_try =", "itemgetter from users.models import User from django.db import models import ast class Exam(models.Model):", "5 digits: 3 for en integer part and 2 for decimal part'), blank=False,", ") question_value = models.DecimalField( (\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits:", "] if len(questions_instances) != len(questions): raise \"must provide all questions associated with this", "in self.get_questions_instances() ] if len(questions_instances) != len(questions): raise \"must provide all questions associated", "associated with this exam\" validation = {} for question_instance, question in zip(questions_instances, questions.values()):", "def save(self, *args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question,", "self.number_of_try >= 3: if self.time().days > 0 or self.time().seconds > (8 * 60", "self.time().seconds > (8 * 60 * 60): self.number_of_try = 0 self.save() return True", "def get_detail_url(self): return reverse(\"exams:exam-detail\", kwargs={\"id\": self.id}) def get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def", "numbers_of_questions = [index + 1 for index in range(questions.count())] return zip(numbers_of_questions, questions) def", "def get_evaluated_url(self): return reverse(\"exams:exam-evaluated\", kwargs={\"idExam\": self.id}) def evaluate(self, questions): if not questions: raise", "the minimun points.\"), }, ) create_date = models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \"", "value of the question. Max 5 digits: 3 for en integer part and", "operator import itemgetter from users.models import User from django.db import models import ast", "answer = models.CharField((\"answer\"), max_length=600, blank=False,) correct_answer = models.BooleanField( default=False, help_text=(\"the answer is correct?.\"),", "def get_user_answer(self): user_answer = ast.literal_eval(self.user_answer) del user_answer[\"approved\"] for question in user_answer.values(): question[\"id\"] =", "validators=[MinValueValidator(0), MaxValueValidator(100)], ) number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None,", "import MaxValueValidator, MinValueValidator from django.utils.translation import gettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist", "try_exam(self, userQuestions): self.number_of_try += 1 self.last_try = timezone.now() self.user_answer = userQuestions new_points =", "if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm in questionlistForms:", "super(Exam, self).save(*args, **kwargs) if questionlistForms: for questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam", "as \"minimun points to approved\"'), # help_text=('point value of the question. Max 5", "answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam = familyInstance[\"Exam\"] answer_instance.question = self answer_instance.save() def get_answers_instances(self):", "kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm", "decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun points to approved\"'), #", "def get_take_url(self): # return reverse(\"courses:course-home-week-content\", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id}) # return reverse(\"exams:exam-take\", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id,", "question[\"points\"] = ( float(question_instance.question_value) if question[\"validate\"] else float(0) ) questions[\"approved\"] = float(self.approved) return", "digits: same as \"minimun points to approved\"'), # help_text=('point value of the question.", "user = models.ForeignKey(User, default=None, on_delete=models.CASCADE) content = models.OneToOneField( Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT", "points to approved\"'), # help_text=('point value of the question. Max 5 digits: 3", "unique=False, help_text=(\"Required. 600 characters or fewer.\"), error_messages={\"unique\": (\"A question with that name already", "super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm in answerlistForms: answer_instance = answerForm.save(commit=False) answer_instance.exam", "> 0 or self.time().seconds > (8 * 60 * 60): self.number_of_try = 0", "(\"points\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], help_text=('Max 5 digits: same as \"minimun points", "\"must provide all questions associated with this exam\" validation = {} for question_instance,", "self).save(*args, **kwargs) if questionlistForms: for questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam =", "60 * 60): self.number_of_try = 0 self.save() return True return False return True", "60 * 60, microseconds=0) - self.time() ) return \"%s hour(s) and %s minut(s)\"", "= models.TextField(default=None, blank=True, null=True) points = models.DecimalField( default=0, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], )", "self.last_try = timezone.now() self.user_answer = userQuestions new_points = 0 for key, question in", "to approved\"), default=None, max_digits=5, decimal_places=2, validators=[MinValueValidator(0), MaxValueValidator(100)], blank=False, null=False, help_text=( \"Put here the", "*args, **kwargs): questionlistForms = kwargs.pop(\"questionlist\", None) if kwargs.pop(\"update\", None): Question.objects.filter(exam=self).delete() super(Exam, self).save(*args, **kwargs)", "userQuestions): self.number_of_try += 1 self.last_try = timezone.now() self.user_answer = userQuestions new_points = 0", "unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"), ) description = models.TextField() approved =", "models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"), ) description", "import ObjectDoesNotExist from courses.models import Content from django.utils import timezone from django.urls import", "raise \"must provide all answers associated with this exam\" question[\"validate\"] = True for", "*args, **kwargs): answerlistForms = kwargs.pop(\"answerlist\", None) familyInstance = kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs)", "for en integer part and 2 for decimal part'), blank=False, null=False, error_messages={ \"blank\":", "def __str__(self): return \"Quiz: \" + self.name def save(self, *args, **kwargs): questionlistForms =", "[index + 1 for index in range(questions.count())] return zip(numbers_of_questions, questions) def get_take_url(self): #", "Question(models.Model): exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE) question = models.CharField( (\"question\"), max_length=600, blank=False, unique=False,", "questions.values()): validation[\"Questionid\"] = question_instance.id answers_instances = question_instance.get_answers_instances() if len(answers_instances) != len(question[\"Answers\"]): raise \"must", "minimun points.\"), \"null\": (\"you must provied the minimun points.\"), }, ) create_date =", ") def try_exam(self, userQuestions): self.number_of_try += 1 self.last_try = timezone.now() self.user_answer = userQuestions", "def get_answers_instances(self): return Answer.objects.filter(question=self) def get_altAnswers_instances(self): return Answer.objects.filter(question=self).order_by(\"?\") class Answer(models.Model): exam = models.ForeignKey(Exam,", "int(question[\"id\"]) return user_answer def time(self): return timezone.now() - self.last_try def time_until_take(self): time =", ") description = models.TextField() approved = models.DecimalField( (\"minimun points to approved\"), default=None, max_digits=5,", "get_edit_url(self): return reverse(\"exams:exam-update\", kwargs={\"id\": self.id}) def get_delete_url(self): return reverse(\"exams:exam-delete\", kwargs={\"id\": self.id}) def get_evaluated_url(self):", "* 60, microseconds=0) - self.time() ) return \"%s hour(s) and %s minut(s)\" %", "\"must provide all answers associated with this exam\" question[\"validate\"] = True for answer_instance,", "= kwargs.pop(\"familyInstance\", None) super(Question, self).save(*args, **kwargs) if answerlistForms: for answerForm in answerlistForms: answer_instance", "in questions.values(): question[\"Answers\"] = sorted(question[\"Answers\"], key=itemgetter(0)) questions_instances = [ question for _, question", ") number_of_try = models.IntegerField( default=0, validators=[MinValueValidator(0), MaxValueValidator(3)] ) last_try = models.DateTimeField(default=None, blank=True, null=True)", "= models.CharField( (\"Exam name\"), max_length=150, unique=False, blank=False, help_text=(\"Required. 150 characters or fewer.\"), )", "= models.DateTimeField(auto_now_add=True) def __str__(self): return \"Quiz: \" + self.name def save(self, *args, **kwargs):", "**kwargs) if questionlistForms: for questionForm in questionlistForms: question_instance = questionForm.save(commit=False) question_instance.exam = self", "django.utils import timezone from django.urls import reverse from operator import itemgetter from users.models", "answers associated with this exam\" question[\"validate\"] = True for answer_instance, answer in zip(answers_instances,", "questionForm.save(commit=False) question_instance.exam = self question_instance.save( answerlist=questionForm.answersForms(), familyInstance={\"Exam\": self}, ) def get_owner(self): return self.user.get_full_name()", "value.\"), }, ) def __str__(self): return \"Question: \" + self.question def save(self, *args," ]
[ "few of the primary parameters are actually used in the circuit. When the", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "1 n_vals = range(6, 13, 1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode", "not, the VariableRefs handle it) for _ in range(1, 10): # If we", "permissions and # limitations under the License. \"\"\" Mutable QNode, complicated primary parameters", "the result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction evaluate(0) # evaluate", "them are always zero. \"\"\" name = \"mutable qnode, complicated primary params\" min_wires", "the License. \"\"\" Mutable QNode, complicated primary parameters benchmark. \"\"\" # pylint: disable=invalid-name", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "License. \"\"\" Mutable QNode, complicated primary parameters benchmark. \"\"\" # pylint: disable=invalid-name import", "not matter if p changes or not, the VariableRefs handle it) for _", "QNode has lots of primary parameters with a complicated nested structure, but relatively", "reconstruct the QNode if the auxiliary params have changed. * Most of the", "but relatively few auxiliary parameters, and only a few of the primary parameters", "def circuit(p, *, aux=0): \"\"\"A very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0])", "this file except in compliance with the License. # You may obtain a", "parameters change. The main reasons why there are significant differences in the execution", "each primary parameter, and the qfunc re-evaluated. In this test this is meant", "re-evaluated. In this test this is meant to be time-consuming, but it is", "to be time-consuming, but it is only strictly necessary if the auxiliary parameters", "nodes have two branches and a scalar return [create_params(n - 1), create_params(n -", "reasons why there are significant differences in the execution speed of this test", "used in the circuit. When the QNode is constructed, a VariableRef is built", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing,", "ANY KIND, either express or implied. # See the License for the specific", "argument # (it does not matter if p changes or not, the VariableRefs", "# (it does not matter if p changes or not, the VariableRefs handle", "main reasons why there are significant differences in the execution speed of this", "\"\"\" This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes,", "are arrays return np.random.randn(2) # the other nodes have two branches and a", "\"\"\"A very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark):", "we had evaluate(i % 2) here instead the auxiliary arguments would change #", "qml import benchmark_utils as bu def circuit(p, *, aux=0): \"\"\"A very simple, lightweight", "the circuit. When the QNode is constructed, a VariableRef is built for each", "the qfunc re-evaluated. In this test this is meant to be time-consuming, but", "device=None, verbose=False): super().__init__(device, verbose) self.qnode = None def setup(self): self.qnode = bu.create_qnode(circuit, self.device,", "this is meant to be time-consuming, but it is only strictly necessary if", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "should efficiently figure out that partial derivatives wrt. them are always zero. \"\"\"", "test between different PL commits: * :meth:`BaseQNode._construct` should only reconstruct the QNode if", "self.qnode = None def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self,", "Technologies Inc. # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "verbose) self.qnode = None def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def", "derivatives wrt. them are always zero. \"\"\" name = \"mutable qnode, complicated primary", "When the QNode is constructed, a VariableRef is built for each primary parameter,", "range(6, 13, 1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode = None def", "VariableRefs handle it) for _ in range(1, 10): # If we had evaluate(i", "OF ANY KIND, either express or implied. # See the License for the", "execution speed of this test between different PL commits: * :meth:`BaseQNode._construct` should only", "only strictly necessary if the auxiliary parameters change. The main reasons why there", "params\" min_wires = 1 n_vals = range(6, 13, 1) def __init__(self, device=None, verbose=False):", "primary params are not used in the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure", "first evaluation and construction evaluate(0) # evaluate the node several times more with", "qfunc re-evaluated. In this test this is meant to be time-consuming, but it", "structure with n levels.\"\"\" if n <= 0: # the leaves are arrays", "# evaluate the node several times more with a different auxiliary argument #", "Most of the primary params are not used in the circuit, hence :meth:`JacobianQNode._construct`", "under the License. \"\"\" Mutable QNode, complicated primary parameters benchmark. \"\"\" # pylint:", "mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts", "aux=0): \"\"\"A very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class", "scalar return [create_params(n - 1), create_params(n - 1), np.random.randn()] p = create_params(n) def", "in the execution speed of this test between different PL commits: * :meth:`BaseQNode._construct`", "complicated primary params\" min_wires = 1 n_vals = range(6, 13, 1) def __init__(self,", "assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction evaluate(0) # evaluate the node", "other nodes have two branches and a scalar return [create_params(n - 1), create_params(n", "PL commits: * :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params", "depends exponentially on n. def create_params(n): \"\"\"Recursively builds a tree structure with n", "specific language governing permissions and # limitations under the License. \"\"\" Mutable QNode,", "n is the number of levels in the primary parameter tree. # Hence", "attempts to measure the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using an extreme", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "0: # the leaves are arrays return np.random.randn(2) # the other nodes have", "auxiliary params have changed. * Most of the primary params are not used", "between different PL commits: * :meth:`BaseQNode._construct` should only reconstruct the QNode if the", "# first evaluation and construction evaluate(0) # evaluate the node several times more", "primary parameters benchmark. \"\"\" # pylint: disable=invalid-name import numpy as np import pennylane", "as bu def circuit(p, *, aux=0): \"\"\"A very simple, lightweight mutable quantum circuit.\"\"\"", "disable=invalid-name import numpy as np import pennylane as qml import benchmark_utils as bu", "as np import pennylane as qml import benchmark_utils as bu def circuit(p, *,", "the auxiliary params have changed. * Most of the primary params are not", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "of this test between different PL commits: * :meth:`BaseQNode._construct` should only reconstruct the", "the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives wrt. them", "circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure", "actually used in the circuit. When the QNode is constructed, a VariableRef is", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "would change # every time, which would negate most possible speedups. evaluate(1) return", "a different auxiliary argument # (it does not matter if p changes or", "required by applicable law or agreed to in writing, software # distributed under", "setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8): # n is", "applicable law or agreed to in writing, software # distributed under the License", "arrays return np.random.randn(2) # the other nodes have two branches and a scalar", "name = \"mutable qnode, complicated primary params\" min_wires = 1 n_vals = range(6,", "only a few of the primary parameters are actually used in the circuit.", "or agreed to in writing, software # distributed under the License is distributed", "constructed, a VariableRef is built for each primary parameter, and the qfunc re-evaluated.", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "two branches and a scalar return [create_params(n - 1), create_params(n - 1), np.random.randn()]", "= None def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8):", "circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives wrt. them are", "tree. # Hence the number of primary parameters depends exponentially on n. def", "hence :meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives wrt. them are always", "__init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode = None def setup(self): self.qnode = bu.create_qnode(circuit,", "an extreme case where the QNode has lots of primary parameters with a", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "= bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8): # n is the number", "of levels in the primary parameter tree. # Hence the number of primary", "circuit. When the QNode is constructed, a VariableRef is built for each primary", "and the qfunc re-evaluated. In this test this is meant to be time-consuming,", "be time-consuming, but it is only strictly necessary if the auxiliary parameters change.", "range(1, 10): # If we had evaluate(i % 2) here instead the auxiliary", "# check the result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction evaluate(0)", "but it is only strictly necessary if the auxiliary parameters change. The main", "necessary if the auxiliary parameters change. The main reasons why there are significant", "exponentially on n. def create_params(n): \"\"\"Recursively builds a tree structure with n levels.\"\"\"", "in the circuit. When the QNode is constructed, a VariableRef is built for", "compliance with the License. # You may obtain a copy of the License", "is constructed, a VariableRef is built for each primary parameter, and the qfunc", "construction evaluate(0) # evaluate the node several times more with a different auxiliary", "# the leaves are arrays return np.random.randn(2) # the other nodes have two", "# n is the number of levels in the primary parameter tree. #", "with n levels.\"\"\" if n <= 0: # the leaves are arrays return", "res = self.qnode(p, aux=aux) # check the result assert np.allclose(res, np.cos(p[aux][2])) # first", "the auxiliary parameters change. The main reasons why there are significant differences in", "is the number of levels in the primary parameter tree. # Hence the", "the qnode using the given auxiliary params.\"\"\" res = self.qnode(p, aux=aux) # check", "have changed. * Most of the primary params are not used in the", "def create_params(n): \"\"\"Recursively builds a tree structure with n levels.\"\"\" if n <=", "# pylint: disable=invalid-name import numpy as np import pennylane as qml import benchmark_utils", "levels in the primary parameter tree. # Hence the number of primary parameters", "different PL commits: * :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary", "auxiliary parameters, and only a few of the primary parameters are actually used", "parameters with a complicated nested structure, but relatively few auxiliary parameters, and only", "the number of levels in the primary parameter tree. # Hence the number", "\"\"\"Evaluates the qnode using the given auxiliary params.\"\"\" res = self.qnode(p, aux=aux) #", "not use this file except in compliance with the License. # You may", "this test between different PL commits: * :meth:`BaseQNode._construct` should only reconstruct the QNode", "not used in the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out that partial", "in the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives wrt.", "builds a tree structure with n levels.\"\"\" if n <= 0: # the", "of primary parameters with a complicated nested structure, but relatively few auxiliary parameters,", "language governing permissions and # limitations under the License. \"\"\" Mutable QNode, complicated", "a complicated nested structure, but relatively few auxiliary parameters, and only a few", "License, Version 2.0 (the \"License\"); # you may not use this file except", "min_wires = 1 n_vals = range(6, 13, 1) def __init__(self, device=None, verbose=False): super().__init__(device,", "2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0", "is built for each primary parameter, and the qfunc re-evaluated. In this test", "params have changed. * Most of the primary params are not used in", "return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure the efficiency of", "<= 0: # the leaves are arrays return np.random.randn(2) # the other nodes", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "commits: * :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params have", "figure out that partial derivatives wrt. them are always zero. \"\"\" name =", "had evaluate(i % 2) here instead the auxiliary arguments would change # every", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "# you may not use this file except in compliance with the License.", "\"mutable qnode, complicated primary params\" min_wires = 1 n_vals = range(6, 13, 1)", "governing permissions and # limitations under the License. \"\"\" Mutable QNode, complicated primary", "agreed to in writing, software # distributed under the License is distributed on", "np import pennylane as qml import benchmark_utils as bu def circuit(p, *, aux=0):", "Copyright 2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version", "1), np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using the given", "(the \"License\"); # you may not use this file except in compliance with", "significant differences in the execution speed of this test between different PL commits:", "class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for", "in the primary parameter tree. # Hence the number of primary parameters depends", "changed. * Most of the primary params are not used in the circuit,", "matter if p changes or not, the VariableRefs handle it) for _ in", "13, 1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode = None def setup(self):", "nested structure, but relatively few auxiliary parameters, and only a few of the", "case where the QNode has lots of primary parameters with a complicated nested", "# Unless required by applicable law or agreed to in writing, software #", "auxiliary parameters change. The main reasons why there are significant differences in the", "by applicable law or agreed to in writing, software # distributed under the", "primary params\" min_wires = 1 n_vals = range(6, 13, 1) def __init__(self, device=None,", "out that partial derivatives wrt. them are always zero. \"\"\" name = \"mutable", "using an extreme case where the QNode has lots of primary parameters with", "always zero. \"\"\" name = \"mutable qnode, complicated primary params\" min_wires = 1", "primary parameters with a complicated nested structure, but relatively few auxiliary parameters, and", "benchmark. \"\"\" # pylint: disable=invalid-name import numpy as np import pennylane as qml", "simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This", "benchmark(self, n=8): # n is the number of levels in the primary parameter", "for _ in range(1, 10): # If we had evaluate(i % 2) here", "file except in compliance with the License. # You may obtain a copy", "return [create_params(n - 1), create_params(n - 1), np.random.randn()] p = create_params(n) def evaluate(aux):", "auxiliary arguments would change # every time, which would negate most possible speedups.", "number of primary parameters depends exponentially on n. def create_params(n): \"\"\"Recursively builds a", "mutable=True, interface=None) def benchmark(self, n=8): # n is the number of levels in", "n=8): # n is the number of levels in the primary parameter tree.", "def benchmark(self, n=8): # n is the number of levels in the primary", "License for the specific language governing permissions and # limitations under the License.", "VariableRef is built for each primary parameter, and the qfunc re-evaluated. In this", "of the primary params are not used in the circuit, hence :meth:`JacobianQNode._construct` should", "Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for mutable", "used in the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives", "to in writing, software # distributed under the License is distributed on an", "built for each primary parameter, and the qfunc re-evaluated. In this test this", "np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction evaluate(0) # evaluate the node several", "time-consuming, but it is only strictly necessary if the auxiliary parameters change. The", "implied. # See the License for the specific language governing permissions and #", "benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using an", "QNode is constructed, a VariableRef is built for each primary parameter, and the", "\"License\"); # you may not use this file except in compliance with the", "very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\"", "speed of this test between different PL commits: * :meth:`BaseQNode._construct` should only reconstruct", "qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct`", "changes or not, the VariableRefs handle it) for _ in range(1, 10): #", "= create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using the given auxiliary params.\"\"\" res", "lots of primary parameters with a complicated nested structure, but relatively few auxiliary", "the other nodes have two branches and a scalar return [create_params(n - 1),", "for each primary parameter, and the qfunc re-evaluated. In this test this is", "or implied. # See the License for the specific language governing permissions and", "should only reconstruct the QNode if the auxiliary params have changed. * Most", "n. def create_params(n): \"\"\"Recursively builds a tree structure with n levels.\"\"\" if n", "and # limitations under the License. \"\"\" Mutable QNode, complicated primary parameters benchmark.", "np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using the given auxiliary", "bu def circuit(p, *, aux=0): \"\"\"A very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2],", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "of primary parameters depends exponentially on n. def create_params(n): \"\"\"Recursively builds a tree", "node several times more with a different auxiliary argument # (it does not", "numpy as np import pennylane as qml import benchmark_utils as bu def circuit(p,", "check the result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction evaluate(0) #", "in writing, software # distributed under the License is distributed on an \"AS", "change. The main reasons why there are significant differences in the execution speed", "benchmark_utils as bu def circuit(p, *, aux=0): \"\"\"A very simple, lightweight mutable quantum", "primary parameter tree. # Hence the number of primary parameters depends exponentially on", "# See the License for the specific language governing permissions and # limitations", "Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "strictly necessary if the auxiliary parameters change. The main reasons why there are", "pylint: disable=invalid-name import numpy as np import pennylane as qml import benchmark_utils as", "are always zero. \"\"\" name = \"mutable qnode, complicated primary params\" min_wires =", "= self.qnode(p, aux=aux) # check the result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation", "a VariableRef is built for each primary parameter, and the qfunc re-evaluated. In", "of :meth:`JacobianQNode._construct` for mutable QNodes, using an extreme case where the QNode has", "n <= 0: # the leaves are arrays return np.random.randn(2) # the other", "*, aux=0): \"\"\"A very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0))", "# Hence the number of primary parameters depends exponentially on n. def create_params(n):", "of the primary parameters are actually used in the circuit. When the QNode", "relatively few auxiliary parameters, and only a few of the primary parameters are", "QNodes, using an extreme case where the QNode has lots of primary parameters", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "with a different auxiliary argument # (it does not matter if p changes", "it) for _ in range(1, 10): # If we had evaluate(i % 2)", "leaves are arrays return np.random.randn(2) # the other nodes have two branches and", "the QNode is constructed, a VariableRef is built for each primary parameter, and", "params are not used in the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out", "structure, but relatively few auxiliary parameters, and only a few of the primary", "[create_params(n - 1), create_params(n - 1), np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates", "* :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params have changed.", "use this file except in compliance with the License. # You may obtain", "more with a different auxiliary argument # (it does not matter if p", "for the specific language governing permissions and # limitations under the License. \"\"\"", ":meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives wrt. them are always zero.", "given auxiliary params.\"\"\" res = self.qnode(p, aux=aux) # check the result assert np.allclose(res,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "evaluate(aux): \"\"\"Evaluates the qnode using the given auxiliary params.\"\"\" res = self.qnode(p, aux=aux)", "return np.random.randn(2) # the other nodes have two branches and a scalar return", "create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using the given auxiliary params.\"\"\" res =", "change # every time, which would negate most possible speedups. evaluate(1) return True", "measure the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using an extreme case where", "and construction evaluate(0) # evaluate the node several times more with a different", "import benchmark_utils as bu def circuit(p, *, aux=0): \"\"\"A very simple, lightweight mutable", "and a scalar return [create_params(n - 1), create_params(n - 1), np.random.randn()] p =", "2.0 (the \"License\"); # you may not use this file except in compliance", "efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using an extreme case where the QNode", "number of levels in the primary parameter tree. # Hence the number of", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "In this test this is meant to be time-consuming, but it is only", ":meth:`JacobianQNode._construct` for mutable QNodes, using an extreme case where the QNode has lots", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in", "this test this is meant to be time-consuming, but it is only strictly", "has lots of primary parameters with a complicated nested structure, but relatively few", "express or implied. # See the License for the specific language governing permissions", "the primary parameters are actually used in the circuit. When the QNode is", "using the given auxiliary params.\"\"\" res = self.qnode(p, aux=aux) # check the result", "np.cos(p[aux][2])) # first evaluation and construction evaluate(0) # evaluate the node several times", "pennylane as qml import benchmark_utils as bu def circuit(p, *, aux=0): \"\"\"A very", "None def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8): #", "bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8): # n is the number of", "if n <= 0: # the leaves are arrays return np.random.randn(2) # the", "levels.\"\"\" if n <= 0: # the leaves are arrays return np.random.randn(2) #", "= \"mutable qnode, complicated primary params\" min_wires = 1 n_vals = range(6, 13,", "either express or implied. # See the License for the specific language governing", "branches and a scalar return [create_params(n - 1), create_params(n - 1), np.random.randn()] p", "quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to", "primary parameters depends exponentially on n. def create_params(n): \"\"\"Recursively builds a tree structure", "the execution speed of this test between different PL commits: * :meth:`BaseQNode._construct` should", "p changes or not, the VariableRefs handle it) for _ in range(1, 10):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "to measure the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using an extreme case", "evaluation and construction evaluate(0) # evaluate the node several times more with a", "have two branches and a scalar return [create_params(n - 1), create_params(n - 1),", "License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\"Recursively builds a tree structure with n levels.\"\"\" if n <= 0: #", "auxiliary params.\"\"\" res = self.qnode(p, aux=aux) # check the result assert np.allclose(res, np.cos(p[aux][2]))", "params.\"\"\" res = self.qnode(p, aux=aux) # check the result assert np.allclose(res, np.cos(p[aux][2])) #", "qnode, complicated primary params\" min_wires = 1 n_vals = range(6, 13, 1) def", "as qml import benchmark_utils as bu def circuit(p, *, aux=0): \"\"\"A very simple,", "- 1), create_params(n - 1), np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates the", "mutable QNodes, using an extreme case where the QNode has lots of primary", "the License. # You may obtain a copy of the License at #", "Inc. # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "on n. def create_params(n): \"\"\"Recursively builds a tree structure with n levels.\"\"\" if", "parameters benchmark. \"\"\" # pylint: disable=invalid-name import numpy as np import pennylane as", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "QNode, complicated primary parameters benchmark. \"\"\" # pylint: disable=invalid-name import numpy as np", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "* Most of the primary params are not used in the circuit, hence", "Hence the number of primary parameters depends exponentially on n. def create_params(n): \"\"\"Recursively", "(it does not matter if p changes or not, the VariableRefs handle it)", "few auxiliary parameters, and only a few of the primary parameters are actually", "times more with a different auxiliary argument # (it does not matter if", "You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "if the auxiliary parameters change. The main reasons why there are significant differences", "auxiliary argument # (it does not matter if p changes or not, the", "circuit(p, *, aux=0): \"\"\"A very simple, lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return", "the given auxiliary params.\"\"\" res = self.qnode(p, aux=aux) # check the result assert", "# the other nodes have two branches and a scalar return [create_params(n -", "with the License. # You may obtain a copy of the License at", "the primary parameter tree. # Hence the number of primary parameters depends exponentially", "for mutable QNodes, using an extreme case where the QNode has lots of", "and only a few of the primary parameters are actually used in the", "If we had evaluate(i % 2) here instead the auxiliary arguments would change", "the QNode has lots of primary parameters with a complicated nested structure, but", "the number of primary parameters depends exponentially on n. def create_params(n): \"\"\"Recursively builds", "the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using an extreme case where the", "where the QNode has lots of primary parameters with a complicated nested structure,", "parameter, and the qfunc re-evaluated. In this test this is meant to be", "tree structure with n levels.\"\"\" if n <= 0: # the leaves are", "only reconstruct the QNode if the auxiliary params have changed. * Most of", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "the specific language governing permissions and # limitations under the License. \"\"\" Mutable", "partial derivatives wrt. them are always zero. \"\"\" name = \"mutable qnode, complicated", "wrt. them are always zero. \"\"\" name = \"mutable qnode, complicated primary params\"", "evaluate the node several times more with a different auxiliary argument # (it", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction evaluate(0) # evaluate the", "self.device, mutable=True, interface=None) def benchmark(self, n=8): # n is the number of levels", "- 1), np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using the", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "meant to be time-consuming, but it is only strictly necessary if the auxiliary", "self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8): # n is the", "def evaluate(aux): \"\"\"Evaluates the qnode using the given auxiliary params.\"\"\" res = self.qnode(p,", "parameters are actually used in the circuit. When the QNode is constructed, a", "wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure the efficiency", "here instead the auxiliary arguments would change # every time, which would negate", "# If we had evaluate(i % 2) here instead the auxiliary arguments would", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software", "zero. \"\"\" name = \"mutable qnode, complicated primary params\" min_wires = 1 n_vals", "primary parameter, and the qfunc re-evaluated. In this test this is meant to", "differences in the execution speed of this test between different PL commits: *", "in compliance with the License. # You may obtain a copy of the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "\"\"\" # pylint: disable=invalid-name import numpy as np import pennylane as qml import", "n_vals = range(6, 13, 1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode =", "instead the auxiliary arguments would change # every time, which would negate most", "or not, the VariableRefs handle it) for _ in range(1, 10): # If", "limitations under the License. \"\"\" Mutable QNode, complicated primary parameters benchmark. \"\"\" #", "See the License for the specific language governing permissions and # limitations under", "the VariableRefs handle it) for _ in range(1, 10): # If we had", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "handle it) for _ in range(1, 10): # If we had evaluate(i %", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "why there are significant differences in the execution speed of this test between", "interface=None) def benchmark(self, n=8): # n is the number of levels in the", "aux=aux) # check the result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and construction", "the node several times more with a different auxiliary argument # (it does", "if the auxiliary params have changed. * Most of the primary params are", "that partial derivatives wrt. them are always zero. \"\"\" name = \"mutable qnode,", "self.qnode(p, aux=aux) # check the result assert np.allclose(res, np.cos(p[aux][2])) # first evaluation and", "\"\"\" name = \"mutable qnode, complicated primary params\" min_wires = 1 n_vals =", "qnode using the given auxiliary params.\"\"\" res = self.qnode(p, aux=aux) # check the", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "are actually used in the circuit. When the QNode is constructed, a VariableRef", ":meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params have changed. *", "verbose=False): super().__init__(device, verbose) self.qnode = None def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True,", "extreme case where the QNode has lots of primary parameters with a complicated", "complicated nested structure, but relatively few auxiliary parameters, and only a few of", "Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the \"License\");", "several times more with a different auxiliary argument # (it does not matter", "= range(6, 13, 1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode = None", "evaluate(0) # evaluate the node several times more with a different auxiliary argument", "a scalar return [create_params(n - 1), create_params(n - 1), np.random.randn()] p = create_params(n)", "are not used in the circuit, hence :meth:`JacobianQNode._construct` should efficiently figure out that", "2) here instead the auxiliary arguments would change # every time, which would", "the auxiliary arguments would change # every time, which would negate most possible", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "is meant to be time-consuming, but it is only strictly necessary if the", "test this is meant to be time-consuming, but it is only strictly necessary", "create_params(n - 1), np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using", "10): # If we had evaluate(i % 2) here instead the auxiliary arguments", "Version 2.0 (the \"License\"); # you may not use this file except in", "import numpy as np import pennylane as qml import benchmark_utils as bu def", "a few of the primary parameters are actually used in the circuit. When", "except in compliance with the License. # You may obtain a copy of", "_ in range(1, 10): # If we had evaluate(i % 2) here instead", "are significant differences in the execution speed of this test between different PL", "1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode = None def setup(self): self.qnode", "qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark attempts to measure the", "parameter tree. # Hence the number of primary parameters depends exponentially on n.", "does not matter if p changes or not, the VariableRefs handle it) for", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", "n levels.\"\"\" if n <= 0: # the leaves are arrays return np.random.randn(2)", "create_params(n): \"\"\"Recursively builds a tree structure with n levels.\"\"\" if n <= 0:", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "p = create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode using the given auxiliary params.\"\"\"", "with a complicated nested structure, but relatively few auxiliary parameters, and only a", "there are significant differences in the execution speed of this test between different", "the primary params are not used in the circuit, hence :meth:`JacobianQNode._construct` should efficiently", "def __init__(self, device=None, verbose=False): super().__init__(device, verbose) self.qnode = None def setup(self): self.qnode =", "parameters, and only a few of the primary parameters are actually used in", "is only strictly necessary if the auxiliary parameters change. The main reasons why", "This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for mutable QNodes, using", "def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None) def benchmark(self, n=8): # n", "the leaves are arrays return np.random.randn(2) # the other nodes have two branches", "different auxiliary argument # (it does not matter if p changes or not,", "parameters depends exponentially on n. def create_params(n): \"\"\"Recursively builds a tree structure with", "lightweight mutable quantum circuit.\"\"\" qml.RX(p[aux][2], wires=[0]) return qml.expval(qml.PauliZ(0)) class Benchmark(bu.BaseBenchmark): \"\"\" This benchmark", "in range(1, 10): # If we had evaluate(i % 2) here instead the", "primary parameters are actually used in the circuit. When the QNode is constructed,", "arguments would change # every time, which would negate most possible speedups. evaluate(1)", "efficiently figure out that partial derivatives wrt. them are always zero. \"\"\" name", "Mutable QNode, complicated primary parameters benchmark. \"\"\" # pylint: disable=invalid-name import numpy as", "% 2) here instead the auxiliary arguments would change # every time, which", "complicated primary parameters benchmark. \"\"\" # pylint: disable=invalid-name import numpy as np import", "the QNode if the auxiliary params have changed. * Most of the primary", "a tree structure with n levels.\"\"\" if n <= 0: # the leaves", "evaluate(i % 2) here instead the auxiliary arguments would change # every time,", "super().__init__(device, verbose) self.qnode = None def setup(self): self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None)", "\"\"\" Mutable QNode, complicated primary parameters benchmark. \"\"\" # pylint: disable=invalid-name import numpy", "import pennylane as qml import benchmark_utils as bu def circuit(p, *, aux=0): \"\"\"A", "np.random.randn(2) # the other nodes have two branches and a scalar return [create_params(n", "QNode if the auxiliary params have changed. * Most of the primary params", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "# Copyright 2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License,", "The main reasons why there are significant differences in the execution speed of", "1), create_params(n - 1), np.random.randn()] p = create_params(n) def evaluate(aux): \"\"\"Evaluates the qnode", "= 1 n_vals = range(6, 13, 1) def __init__(self, device=None, verbose=False): super().__init__(device, verbose)", "if p changes or not, the VariableRefs handle it) for _ in range(1,", "# limitations under the License. \"\"\" Mutable QNode, complicated primary parameters benchmark. \"\"\"", "it is only strictly necessary if the auxiliary parameters change. The main reasons" ]
[ "deleted. \"\"\" def __init__(self, queryset): self._cache = [obj for obj in queryset.all()] def", "cache relationships to objects that have been deleted. \"\"\" def __init__(self, queryset): self._cache", "\"\"\" def __init__(self, queryset): self._cache = [obj for obj in queryset.all()] def all(self):", "\"\"\" A fake QuerySet that can be used to cache relationships to objects", "relationships to objects that have been deleted. \"\"\" def __init__(self, queryset): self._cache =", "def __init__(self, queryset): self._cache = [obj for obj in queryset.all()] def all(self): return", "DummyQuerySet: \"\"\" A fake QuerySet that can be used to cache relationships to", "can be used to cache relationships to objects that have been deleted. \"\"\"", "A fake QuerySet that can be used to cache relationships to objects that", "__init__(self, queryset): self._cache = [obj for obj in queryset.all()] def all(self): return self._cache", "been deleted. \"\"\" def __init__(self, queryset): self._cache = [obj for obj in queryset.all()]", "used to cache relationships to objects that have been deleted. \"\"\" def __init__(self,", "to objects that have been deleted. \"\"\" def __init__(self, queryset): self._cache = [obj", "class DummyQuerySet: \"\"\" A fake QuerySet that can be used to cache relationships", "QuerySet that can be used to cache relationships to objects that have been", "fake QuerySet that can be used to cache relationships to objects that have", "to cache relationships to objects that have been deleted. \"\"\" def __init__(self, queryset):", "have been deleted. \"\"\" def __init__(self, queryset): self._cache = [obj for obj in", "objects that have been deleted. \"\"\" def __init__(self, queryset): self._cache = [obj for", "that can be used to cache relationships to objects that have been deleted.", "that have been deleted. \"\"\" def __init__(self, queryset): self._cache = [obj for obj", "be used to cache relationships to objects that have been deleted. \"\"\" def" ]
[ "= renderer_context or {} request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json", "\"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} request =", "\"\") # user center check username password response = Response({\"user\": \"user_info\", \"token\": token})", "{ \"token\": token, \"host\": request.get_host(), } response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,))", "jsonp render \"\"\" media_type = \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context =", "response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 * 60 * 60) return response @api_view([\"GET\"])", "( api_view, permission_classes, authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny from rest_framework.response import", "... data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return Response(data) mock_urls", "data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\" + json + b\");\" @api_view([\"GET\"])", "return callback.encode(\"utf-8\") + b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,),", "# user center check token ... data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1},", "return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\") # user", ") return callback.encode(\"utf-8\") + b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes(", "from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.authentication import BaseAuthentication from", "center check username password response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\",", "response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\",", "password response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24", "\"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 * 60 * 60) return", "token} return Response(data) mock_urls = [ url(\"^jsonp/\", jsonp), url(\"^login/\", login), url(r\"^check_token/(?P<token>[A-Za-z0-9]+)/$\", check_token), ]", "\"auth\") password = request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") # user center check", "JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer):", "import JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class", "login(request): token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\")", "media_type = \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {}", "{\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return Response(data) mock_urls = [ url(\"^jsonp/\", jsonp),", "renderer_context or {} request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json =", "token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 * 60 * 60) return response", "request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") # user center", "data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} request = renderer_context.get(\"request\", None) callback", "or {} request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer,", "Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password", "= Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 * 60", "token = request.COOKIES.get(\"auth\") # user center check token ... data = {\"user_info\": {\"username\":", "django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp", "accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,))", "# user center check username password response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\",", "data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return Response(data) mock_urls =", "from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\"", "request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data,", "super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\" + json +", "request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") # user center check username password response", "= request.data.get(\"username\", \"\") # user center check username password response = Response({\"user\": \"user_info\",", "= request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token, \"host\": request.get_host(), } response =", "cookies = { \"token\": token, \"host\": request.get_host(), } response = Response(cookies) return response", "@permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\") # user center check token ...", "Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls import url", "= renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type,", "permission_classes, authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny from rest_framework.response import Response from", "(JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token,", "request.COOKIES.get(\"auth\") # user center check token ... data = {\"user_info\": {\"username\": \"admin\", \"user_id\":", "} response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token =", "renderer_context=None): renderer_context = renderer_context or {} request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\",", "password = request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") # user center check username", "Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 * 60 *", "\"\") username = request.data.get(\"username\", \"\") # user center check username password response =", "= \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} request", ") def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token, \"host\":", "= request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") # user", "expires=30 * 24 * 60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def", "from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls import url class", "renderer_classes, ) from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.authentication import", "class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type", "{} request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render(", "user center check token ... data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\":", "rest_framework.response import Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls", "renderer_context ) return callback.encode(\"utf-8\") + b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,))", "user center check username password response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token,", "24 * 60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token):", "render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} request = renderer_context.get(\"request\", None)", "request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") +", "rest_framework.decorators import ( api_view, permission_classes, authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny from", "rest_framework.renderers import JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return", "token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") #", "{\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return Response(data) mock_urls = [ url(\"^jsonp/\",", "@permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username =", "@api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\") # user center check", "1}, \"token\": token} return Response(data) mock_urls = [ url(\"^jsonp/\", jsonp), url(\"^login/\", login), url(r\"^check_token/(?P<token>[A-Za-z0-9]+)/$\",", "+ json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token", "AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type =", "import Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls import", "response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 *", "\"\"\" media_type = \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or", "username password response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 *", "check_token(request, token): token = request.COOKIES.get(\"auth\") # user center check token ... data =", "renderer_context = renderer_context or {} request = renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\")", "b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\", \"\")", "* 24 * 60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request,", "import AllowAny from rest_framework.response import Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import", "@renderer_classes( (JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies = { \"token\":", "token, \"host\": request.get_host(), } response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def", "callback.encode(\"utf-8\") + b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), )", "renderer_context.get(\"request\", None) callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context", "+ b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def", "rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication):", "jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token, \"host\": request.get_host(), }", "\"token\": token, \"host\": request.get_host(), } response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,))", "token = request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token, \"host\": request.get_host(), } response", "import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render", "b\"(\" + json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request):", "username = request.data.get(\"username\", \"\") # user center check username password response = Response({\"user\":", "check username password response = Response({\"user\": \"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30", "return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password =", "* 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\")", "from rest_framework.renderers import JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self, request):", "class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type = \"application/javascript\" def render(self, data, accepted_media_type=None,", "+ b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\",", "\"\"\" jsonp render \"\"\" media_type = \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context", "self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\" + json + b\");\"", "= { \"token\": token, \"host\": request.get_host(), } response = Response(cookies) return response @api_view([\"POST\"])", "request.data.get(\"username\", \"\") # user center check username password response = Response({\"user\": \"user_info\", \"token\":", "@api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies", "= {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return Response(data) mock_urls = [", "token): token = request.COOKIES.get(\"auth\") # user center check token ... data = {\"user_info\":", "= request.data.get(\"password\", \"\") username = request.data.get(\"username\", \"\") # user center check username password", "= request.COOKIES.get(\"auth\") # user center check token ... data = {\"user_info\": {\"username\": \"admin\",", "@permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies = {", ") from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.authentication import BaseAuthentication", "authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type = \"application/javascript\" def", "* 60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token", "api_view, permission_classes, authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny from rest_framework.response import Response", "@authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username", "= request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\")", "accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} request = renderer_context.get(\"request\", None) callback =", "request.get_host(), } response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token", "\"host\": request.get_host(), } response = Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request):", "authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.authentication", "\"user_id\": 1}, \"token\": token} return Response(data) mock_urls = [ url(\"^jsonp/\", jsonp), url(\"^login/\", login),", "60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\") #", "json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\" +", "\"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\"", "url class AnyAuthentication(BaseAuthentication): def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\"", "request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token, \"host\": request.get_host(), } response = Response(cookies)", "callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return", "response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\") # user center", "domain=\"0.0.0.0\", expires=30 * 24 * 60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,))", "AllowAny from rest_framework.response import Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer", "response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\",", "60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token =", "def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies = { \"token\": token, \"host\": request.get_host(),", "from rest_framework.response import Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers import JSONRenderer from", "def check_token(request, token): token = request.COOKIES.get(\"auth\") # user center check token ... data", "center check token ... data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token}", "\"user_info\", \"token\": token}) response.set_cookie(\"auth\", token, domain=\"0.0.0.0\", expires=30 * 24 * 60 * 60)", "= super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context ) return callback.encode(\"utf-8\") + b\"(\" + json", "\"\") cookies = { \"token\": token, \"host\": request.get_host(), } response = Response(cookies) return", "@authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token = request.COOKIES.get(\"auth\", \"\") cookies =", "json + b\");\" @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) @renderer_classes( (JSONPRenderer,), ) def jsonp(request): token =", "= Response(cookies) return response @api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\")", "import BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def", "rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.authentication import BaseAuthentication from rest_framework.renderers", "from rest_framework.decorators import ( api_view, permission_classes, authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny", "request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type = \"application/javascript\" def render(self,", "None) callback = request.query_params.get(\"callback\", \"callback\") json = super(JSONPRenderer, self).render( data, accepted_media_type, renderer_context )", "render \"\"\" media_type = \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context", "token ... data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return Response(data)", "check token ... data = {\"user_info\": {\"username\": \"admin\", \"user_id\": 1}, \"token\": token} return", "\"admin\", \"user_id\": 1}, \"token\": token} return Response(data) mock_urls = [ url(\"^jsonp/\", jsonp), url(\"^login/\",", "@api_view([\"POST\"]) @authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\")", "def login(request): token = request.COOKIES.get(\"auth\", \"auth\") password = request.data.get(\"password\", \"\") username = request.data.get(\"username\",", "@authentication_classes((AnyAuthentication,)) @permission_classes((AllowAny,)) def check_token(request, token): token = request.COOKIES.get(\"auth\") # user center check token", "return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type = \"application/javascript\" def render(self, data,", "token, domain=\"0.0.0.0\", expires=30 * 24 * 60 * 60) return response @api_view([\"GET\"]) @authentication_classes((AnyAuthentication,))", "import ( api_view, permission_classes, authentication_classes, renderer_classes, ) from rest_framework.permissions import AllowAny from rest_framework.response", "def authenticate(self, request): return class JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type = \"application/javascript\"", "def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} request = renderer_context.get(\"request\",", "\"token\": token} return Response(data) mock_urls = [ url(\"^jsonp/\", jsonp), url(\"^login/\", login), url(r\"^check_token/(?P<token>[A-Za-z0-9]+)/$\", check_token),", "JSONPRenderer(JSONRenderer): \"\"\" jsonp render \"\"\" media_type = \"application/javascript\" def render(self, data, accepted_media_type=None, renderer_context=None):", "BaseAuthentication from rest_framework.renderers import JSONRenderer from django.conf.urls import url class AnyAuthentication(BaseAuthentication): def authenticate(self," ]
[ "image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't", "django.shortcuts import render, redirect from django.http import HttpResponse, Http404, HttpResponseRedirect import datetime as", "# Create your views here. def welcome(request): return render(request, 'image.html') def image(request, id):", "here. def welcome(request): return render(request, 'image.html') def image(request, id): try: image = Image.objects.get(pk", "views here. def welcome(request): return render(request, 'image.html') def image(request, id): try: image =", "<gh_stars>0 from django.shortcuts import render, redirect from django.http import HttpResponse, Http404, HttpResponseRedirect import", "Http404() return render(request, 'images.html', {\"image\": image}) def search_results(request): if 'image' in request.GET and", "HttpResponse, Http404, HttpResponseRedirect import datetime as dt # Create your views here. def", "id): try: image = Image.objects.get(pk = id) except DoesNotExist: raise Http404() return render(request,", "search_results(request): if 'image' in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term)", "DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\": image}) def search_results(request): if 'image' in", "render, redirect from django.http import HttpResponse, Http404, HttpResponseRedirect import datetime as dt #", "'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't searched for any term\" return render(request,", "{\"image\": image}) def search_results(request): if 'image' in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\")", "def image(request, id): try: image = Image.objects.get(pk = id) except DoesNotExist: raise Http404()", "= Image.objects.get(pk = id) except DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\": image})", "'image' in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message =", "in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\"", "from django.http import HttpResponse, Http404, HttpResponseRedirect import datetime as dt # Create your", "searched_images}) else: message = \"You haven't searched for any term\" return render(request, 'search.html',{\"message\":message})", "your views here. def welcome(request): return render(request, 'image.html') def image(request, id): try: image", "= id) except DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\": image}) def search_results(request):", "import HttpResponse, Http404, HttpResponseRedirect import datetime as dt # Create your views here.", "raise Http404() return render(request, 'images.html', {\"image\": image}) def search_results(request): if 'image' in request.GET", "'images.html', {\"image\": image}) def search_results(request): if 'image' in request.GET and request.GET[\"image\"]: search_term =", "dt # Create your views here. def welcome(request): return render(request, 'image.html') def image(request,", "if 'image' in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message", "= request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else:", "from django.shortcuts import render, redirect from django.http import HttpResponse, Http404, HttpResponseRedirect import datetime", "def welcome(request): return render(request, 'image.html') def image(request, id): try: image = Image.objects.get(pk =", "image = Image.objects.get(pk = id) except DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\":", "return render(request, 'image.html') def image(request, id): try: image = Image.objects.get(pk = id) except", "welcome(request): return render(request, 'image.html') def image(request, id): try: image = Image.objects.get(pk = id)", "image(request, id): try: image = Image.objects.get(pk = id) except DoesNotExist: raise Http404() return", "as dt # Create your views here. def welcome(request): return render(request, 'image.html') def", "f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't searched for any", "import render, redirect from django.http import HttpResponse, Http404, HttpResponseRedirect import datetime as dt", "import datetime as dt # Create your views here. def welcome(request): return render(request,", "def search_results(request): if 'image' in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images =", "searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message =", "except DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\": image}) def search_results(request): if 'image'", "= image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You", "request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return", "request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\":", "return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't searched for any term\"", "Image.objects.get(pk = id) except DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\": image}) def", "render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't searched for any term\" return", "and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return render(request,", "= f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't searched for", "image}) def search_results(request): if 'image' in request.GET and request.GET[\"image\"]: search_term = request.GET.get(\"image\") searched_images", "return render(request, 'images.html', {\"image\": image}) def search_results(request): if 'image' in request.GET and request.GET[\"image\"]:", "Create your views here. def welcome(request): return render(request, 'image.html') def image(request, id): try:", "Http404, HttpResponseRedirect import datetime as dt # Create your views here. def welcome(request):", "render(request, 'images.html', {\"image\": image}) def search_results(request): if 'image' in request.GET and request.GET[\"image\"]: search_term", "'image.html') def image(request, id): try: image = Image.objects.get(pk = id) except DoesNotExist: raise", "try: image = Image.objects.get(pk = id) except DoesNotExist: raise Http404() return render(request, 'images.html',", "id) except DoesNotExist: raise Http404() return render(request, 'images.html', {\"image\": image}) def search_results(request): if", "datetime as dt # Create your views here. def welcome(request): return render(request, 'image.html')", "search_term = request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images})", "message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message = \"You haven't searched", "request.GET.get(\"image\") searched_images = image.search_by_title(search_term) message = f\"{search_term}\" return render(request, 'search.html',{\"message\":message,\"picture\": searched_images}) else: message", "redirect from django.http import HttpResponse, Http404, HttpResponseRedirect import datetime as dt # Create", "render(request, 'image.html') def image(request, id): try: image = Image.objects.get(pk = id) except DoesNotExist:", "HttpResponseRedirect import datetime as dt # Create your views here. def welcome(request): return", "django.http import HttpResponse, Http404, HttpResponseRedirect import datetime as dt # Create your views" ]
[]
[ "line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets all the examples.\"\"\" return self._examples def", "uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False @property def is_cloaking(self) -> bool: \"\"\"States", "this line of code to the example block examples.add_line(clean_line) examples.end(line_index) return {example.key: example.text", "str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip = len(line) -", "in config.drop_lines): continue for r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains,", "capturing code from the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num )", "None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None: \"\"\"Stops", "if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num: int) -> None:", ") def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None: \"\"\"Validates line.\"\"\"", "config.end_flag in line: # stop capturing, and discard empty blocks examples.store_current(line_num) continue if", "if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\"", "exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False @property def is_cloaking(self) -> bool:", "config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match in clean_line for match in", "path: str) -> dict: \"\"\"Finds snippets in lines of text.\"\"\" examples = Examples()", "tuple: \"\"\"Gets the example key.\"\"\" return self._key @property def debug_id(self) -> str: \"\"\"Gets", "line_index = line_num if config.start_flag in line: # start capturing code from the", "len(self._text) == 0 @property def text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return self._text", "str) -> None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str,", "None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\"", "trigger in line: debug_info = self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase", "if config.start_flag in line: # start capturing code from the next line examples.set_current(", "# add this line of code to the example block examples.add_line(clean_line) examples.end(line_index) return", "dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line: str, line_num:", "self._examples def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict: \"\"\"Finds snippets in", "enumerate(lines): line_index = line_num if config.start_flag in line: # start capturing code from", "path: str, line_num: int, example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key =", "examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num) continue # whilst capturing, append code", "line.\"\"\" if not self._current_example: return line start = self._current_example.strip_number return line[start:].rstrip() @property def", "self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num)", "# whilst capturing, append code lines to the current block if config.fail_on_dedent: examples.validate_dedent(line,", "cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking = True def", "self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\"", "line_num, line in enumerate(lines): line_index = line_num if config.start_flag in line: # start", "= True def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking:", "text.\"\"\" return self._text @property def strip_number(self) -> int: \"\"\"Gets the example strip number.\"\"\"", "def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line)", "None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking =", "clean_line = examples.clean_line(line) if any(match in clean_line for match in config.drop_lines): continue for", "for r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) #", "# stop capturing, and discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag in line:", "if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise", "raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str],", "start = self._current_example.strip_number return line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets all the", "def is_cloaking(self) -> bool: \"\"\"States whether it's in cloaking mode.\"\"\" return self._cloaking @property", "class Examples: \"\"\"All the examples in a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\"", "not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False @property def", "\"\"\"Gets all the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines: list, path: str)", "{self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\"", "examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag in line: #", "-> str: \"\"\"Gets some debug information about the example.\"\"\" return str(self.key) class Examples:", "for match in config.drop_lines): continue for r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before,", "lines of text.\"\"\" examples = Examples() line_index = 0 for line_num, line in", "str: \"\"\"Cleans a line.\"\"\" if not self._current_example: return line start = self._current_example.strip_number return", "if self._cloaking: return self._text.append(line) def cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if", "Config class Example: \"\"\"An example.\"\"\" def __init__(self, path: str, line_num: int, example_name: str,", "examples = Examples() line_index = 0 for line_num, line in enumerate(lines): line_index =", "line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag in line: # stop capturing,", "example text.\"\"\" return self._text @property def strip_number(self) -> int: \"\"\"Gets the example strip", ") if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num: int) ->", "0 for line_num, line in enumerate(lines): line_index = line_num if config.start_flag in line:", "exceptions.CloakMismatch( f\"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if not", "int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\")", "line_num) # add this line of code to the example block examples.add_line(clean_line) examples.end(line_index)", "snippet.config import Config class Example: \"\"\"An example.\"\"\" def __init__(self, path: str, line_num: int,", "-> bool: \"\"\"States whether it's in cloaking mode.\"\"\" return self._cloaking @property def is_empty(self)", "whether it's in cloaking mode.\"\"\" return self._cloaking @property def is_empty(self) -> bool: \"\"\"States", "example.\"\"\" return str(self.key) class Examples: \"\"\"All the examples in a file.\"\"\" def __init__(self)", "add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self,", "str, line_num: int) -> None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if trigger", "-> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking", "the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match in", "current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match in clean_line", "raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line: str) -> str:", "the example is empty or not.\"\"\" return len(self._text) == 0 @property def text(self)", "self._cloaking @property def is_empty(self) -> bool: \"\"\"States whether the example is empty or", "rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing import List,", "self._text: List[str] = list() self._cloaking = False def add_line(self, line: str) -> None:", "\"\"\"Gets some debug information about the example.\"\"\" return str(self.key) class Examples: \"\"\"All the", "line: str) -> None: \"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self,", "in fail_on_contains: if trigger in line: debug_info = self._current_example.debug_id if self._current_example else \"\"", "len(line.lstrip()) self._text: List[str] = list() self._cloaking = False def add_line(self, line: str) ->", "whilst capturing, append code lines to the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num)", "self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False @property def is_cloaking(self)", "list, path: str) -> dict: \"\"\"Finds snippets in lines of text.\"\"\" examples =", "add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line) def", "example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking: raise", "= self._current_example.strip_number return line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets all the examples.\"\"\"", "True def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise", "List[Example] = list() self._current_example: Optional[Example] = None def set_current(self, example: Example, line_num: int)", "self._cloaking: return self._text.append(line) def cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking:", "the examples in a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] =", "-> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) -> None:", "({line_num})\" ) def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._current_example:", "trigger in fail_on_contains: if trigger in line: debug_info = self._current_example.debug_id if self._current_example else", "number.\"\"\" return self._strip @property def key(self) -> tuple: \"\"\"Gets the example key.\"\"\" return", "\"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if trigger in line: debug_info = self._current_example.debug_id", "in line: # start capturing code from the next line examples.set_current( Example(path=path, line_num=line_num,", "from snippet.config import Config class Example: \"\"\"An example.\"\"\" def __init__(self, path: str, line_num:", "reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing import List, Optional", "lines: list, path: str) -> dict: \"\"\"Finds snippets in lines of text.\"\"\" examples", "if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False @property", "({line_num})\") self._cloaking = False @property def is_cloaking(self) -> bool: \"\"\"States whether it's in", "-> None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if trigger in line: debug_info", "reached whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str) -> None:", "of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example)", "reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example =", "-> dict: \"\"\"Finds snippets in lines of text.\"\"\" examples = Examples() line_index =", "Config, lines: list, path: str) -> dict: \"\"\"Finds snippets in lines of text.\"\"\"", "current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking:", "else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line: str)", "def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached", "bool: \"\"\"States whether it's in cloaking mode.\"\"\" return self._cloaking @property def is_empty(self) ->", "r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line of code to the example", "if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example:", "str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip =", "{self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None:", "None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if trigger in line: debug_info =", "in enumerate(lines): line_index = line_num if config.start_flag in line: # start capturing code", "not.\"\"\" return len(self._text) == 0 @property def text(self) -> List[str]: \"\"\"Gets example text.\"\"\"", "-> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id}", "text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return self._text @property def strip_number(self) -> int:", "not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id}", "line_num ) continue if config.end_flag in line: # stop capturing, and discard empty", "raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example", "@property def text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return self._text @property def strip_number(self)", "if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def", "if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\"", "Example: \"\"\"An example.\"\"\" def __init__(self, path: str, line_num: int, example_name: str, line: str)", "line: examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num) continue # whilst capturing, append", "-> None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num:", "self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self, line_num:", "\"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if", "discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag", "exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self, line_num: int) ->", "to the example block examples.add_line(clean_line) examples.end(line_index) return {example.key: example.text for example in examples.all}", "= False def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._cloaking:", "\"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip = len(line) - len(line.lstrip()) self._text: List[str]", "at {self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self, line_num: int) -> None: \"\"\"Stops", "continue if config.end_flag in line: # stop capturing, and discard empty blocks examples.store_current(line_num)", "class Example: \"\"\"An example.\"\"\" def __init__(self, path: str, line_num: int, example_name: str, line:", "any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self,", "cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False", "dict: \"\"\"Finds snippets in lines of text.\"\"\" examples = Examples() line_index = 0", "= (path, line_num, example_name) self._strip = len(line) - len(line.lstrip()) self._text: List[str] = list()", ") def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line)", "def store_current(self, line_num: int) -> None: \"\"\"Stores current example.\"\"\" if not self._current_example: raise", "def debug_id(self) -> str: \"\"\"Gets some debug information about the example.\"\"\" return str(self.key)", "str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return if any(line[:", "raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking = False @property def is_cloaking(self) ->", "-> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\")", "it's in cloaking mode.\"\"\" return self._cloaking @property def is_empty(self) -> bool: \"\"\"States whether", "for trigger in fail_on_contains: if trigger in line: debug_info = self._current_example.debug_id if self._current_example", "file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example] =", "add this line of code to the example block examples.add_line(clean_line) examples.end(line_index) return {example.key:", "still cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def", "int) -> None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if trigger in line:", "line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag in line:", "Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing import List, Optional from snippet import", "List, Optional from snippet import exceptions from snippet.config import Config class Example: \"\"\"An", "line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id}", "return self._strip @property def key(self) -> tuple: \"\"\"Gets the example key.\"\"\" return self._key", "self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line:", "capturing, and discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num) continue", "def is_empty(self) -> bool: \"\"\"States whether the example is empty or not.\"\"\" return", "is empty or not.\"\"\" return len(self._text) == 0 @property def text(self) -> List[str]:", "to the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match", "def text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return self._text @property def strip_number(self) ->", "start capturing code from the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num", "def key(self) -> tuple: \"\"\"Gets the example key.\"\"\" return self._key @property def debug_id(self)", "extract_snippets_from_text(config: Config, lines: list, path: str) -> dict: \"\"\"Finds snippets in lines of", "len(line) - len(line.lstrip()) self._text: List[str] = list() self._cloaking = False def add_line(self, line:", "list() self._current_example: Optional[Example] = None def set_current(self, example: Example, line_num: int) -> None:", "line: # stop capturing, and discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag in", "line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still", "empty blocks examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag in", "r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this", "snippet import exceptions from snippet.config import Config class Example: \"\"\"An example.\"\"\" def __init__(self,", "debug_info = self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info}", "List[str] = list() self._cloaking = False def add_line(self, line: str) -> None: \"\"\"Adds", "return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" )", "mode.\"\"\" return self._cloaking @property def is_empty(self) -> bool: \"\"\"States whether the example is", "whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line: str, line_num: int)", "- len(line.lstrip()) self._text: List[str] = list() self._cloaking = False def add_line(self, line: str)", "@property def is_empty(self) -> bool: \"\"\"States whether the example is empty or not.\"\"\"", "0 @property def text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return self._text @property def", "in line: examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num) continue # whilst capturing,", "= examples.clean_line(line) if any(match in clean_line for match in config.drop_lines): continue for r_before,", "bool: \"\"\"States whether the example is empty or not.\"\"\" return len(self._text) == 0", "= self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\")", "def clean_line(self, line: str) -> str: \"\"\"Cleans a line.\"\"\" if not self._current_example: return", "if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def", "\"\"\"Validates dedent.\"\"\" if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent", "None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected", "f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str) ->", "None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\"", "about the example.\"\"\" return str(self.key) class Examples: \"\"\"All the examples in a file.\"\"\"", "({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None: \"\"\"Validates", "in line: # stop capturing, and discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag", "example is empty or not.\"\"\" return len(self._text) == 0 @property def text(self) ->", "\"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking =", "None: \"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self, line_num: int) ->", "-> None: \"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self, line_num: int)", "at {debug_info} ({line_num})\") def clean_line(self, line: str) -> str: \"\"\"Cleans a line.\"\"\" if", "cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self,", "line in enumerate(lines): line_index = line_num if config.start_flag in line: # start capturing", "of text.\"\"\" examples = Examples() line_index = 0 for line_num, line in enumerate(lines):", "end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst", "List[str]: \"\"\"Gets example text.\"\"\" return self._text @property def strip_number(self) -> int: \"\"\"Gets the", "-> int: \"\"\"Gets the example strip number.\"\"\" return self._strip @property def key(self) ->", "str) -> str: \"\"\"Cleans a line.\"\"\" if not self._current_example: return line start =", "a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example]", "from snippet import exceptions from snippet.config import Config class Example: \"\"\"An example.\"\"\" def", "is_empty(self) -> bool: \"\"\"States whether the example is empty or not.\"\"\" return len(self._text)", "validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None: \"\"\"Validates line.\"\"\" for trigger", "line: str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return if", "self._cloaking = True def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if not", "raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line:", "self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self,", "whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None", "in line: debug_info = self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)}", "snippets in lines of text.\"\"\" examples = Examples() line_index = 0 for line_num,", "\"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self, line_num: int) -> None:", "at {self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self, line_num: int) -> None: \"\"\"Stores", "= line_num if config.start_flag in line: # start capturing code from the next", "line: examples.cloak(line_num) continue # whilst capturing, append code lines to the current block", "\"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\"", "examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line of code to the example block", "({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num: int)", "def strip_number(self) -> int: \"\"\"Gets the example strip number.\"\"\" return self._strip @property def", "line: str) -> None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line:", "List[str], line: str, line_num: int) -> None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains:", "line: str) -> str: \"\"\"Cleans a line.\"\"\" if not self._current_example: return line start", "if config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num) continue #", "int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) ->", "phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line: str) -> str: \"\"\"Cleans a", "continue # whilst capturing, append code lines to the current block if config.fail_on_dedent:", "validate_dedent(self, line: str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return", "self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(", "config.cloak_flag in line: examples.cloak(line_num) continue # whilst capturing, append code lines to the", "for line_num, line in enumerate(lines): line_index = line_num if config.start_flag in line: #", "examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match in clean_line for match in config.drop_lines):", "# SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing import List, Optional from", "== 0 @property def text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return self._text @property", "str: \"\"\"Gets some debug information about the example.\"\"\" return str(self.key) class Examples: \"\"\"All", "({line_num})\") self._cloaking = True def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if", "self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def", "clean_line for match in config.drop_lines): continue for r_before, r_after in config.replacements.items(): clean_line =", "int) -> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id}", "in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line", "line of code to the example block examples.add_line(clean_line) examples.end(line_index) return {example.key: example.text for", "\"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line: str) ->", "cloaked at {self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self, line_num: int) -> None:", "all(self) -> list: \"\"\"Gets all the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines:", "list: \"\"\"Gets all the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines: list, path:", "in a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example:", "def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already", "line: # start capturing code from the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(),", "from the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if", "capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) ->", "# # Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0", "\"\"\"An example.\"\"\" def __init__(self, path: str, line_num: int, example_name: str, line: str) ->", ") continue if config.end_flag in line: # stop capturing, and discard empty blocks", "None: \"\"\"Stores current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\")", "\"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example] = None def set_current(self, example: Example,", "{repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line: str) -> str: \"\"\"Cleans a line.\"\"\"", "self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains:", "False def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._cloaking: return", "Examples: \"\"\"All the examples in a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples:", "a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int) -> None:", "extractor.\"\"\" from typing import List, Optional from snippet import exceptions from snippet.config import", "not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num: int) -> None: \"\"\"Start", "self._examples: List[Example] = list() self._current_example: Optional[Example] = None def set_current(self, example: Example, line_num:", "a line.\"\"\" if not self._current_example: return line start = self._current_example.strip_number return line[start:].rstrip() @property", "int, example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name)", "\"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\" )", "None def set_current(self, example: Example, line_num: int) -> None: \"\"\"Sets current example.\"\"\" if", "line.\"\"\" for trigger in fail_on_contains: if trigger in line: debug_info = self._current_example.debug_id if", "= False @property def is_cloaking(self) -> bool: \"\"\"States whether it's in cloaking mode.\"\"\"", "import List, Optional from snippet import exceptions from snippet.config import Config class Example:", "clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line of code", "line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int)", "exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self, line_num: int) ->", "= 0 for line_num, line in enumerate(lines): line_index = line_num if config.start_flag in", "line_num: int, example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num,", "self._cloaking = False def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if", "def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None: \"\"\"Validates line.\"\"\" for", "@property def debug_id(self) -> str: \"\"\"Gets some debug information about the example.\"\"\" return", "match in config.drop_lines): continue for r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after)", "is_cloaking(self) -> bool: \"\"\"States whether it's in cloaking mode.\"\"\" return self._cloaking @property def", "return line start = self._current_example.strip_number return line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets", "raise exceptions.CloakMismatch( f\"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if", "in line: examples.cloak(line_num) continue # whilst capturing, append code lines to the current", "uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked", "stop capturing, and discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num)", "Examples() line_index = 0 for line_num, line in enumerate(lines): line_index = line_num if", "if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing", "examples.cloak(line_num) continue # whilst capturing, append code lines to the current block if", "def all(self) -> list: \"\"\"Gets all the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config,", "continue if config.cloak_flag in line: examples.cloak(line_num) continue # whilst capturing, append code lines", "key.\"\"\" return self._key @property def debug_id(self) -> str: \"\"\"Gets some debug information about", "self._current_example: Optional[Example] = None def set_current(self, example: Example, line_num: int) -> None: \"\"\"Sets", "cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example:", "line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\"", "SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing import List, Optional from snippet", "= example def store_current(self, line_num: int) -> None: \"\"\"Stores current example.\"\"\" if not", "exceptions.ValidationFailure( f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line:", "exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached", "int) -> None: \"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id}", "and discard empty blocks examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num) continue if", "-> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip = len(line) - len(line.lstrip())", "-> bool: \"\"\"States whether the example is empty or not.\"\"\" return len(self._text) ==", "the example.\"\"\" return str(self.key) class Examples: \"\"\"All the examples in a file.\"\"\" def", "None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int)", "self._current_example: return line start = self._current_example.strip_number return line[start:].rstrip() @property def all(self) -> list:", "self._current_example = None def cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example:", "\"\"\"All the examples in a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example]", "# start capturing code from the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line),", "str, line_num: int, example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path,", "raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self, line_num: int)", "int) -> None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise", "empty or not.\"\"\" return len(self._text) == 0 @property def text(self) -> List[str]: \"\"\"Gets", "None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip = len(line) - len(line.lstrip()) self._text:", "@property def key(self) -> tuple: \"\"\"Gets the example key.\"\"\" return self._key @property def", "self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" )", "None: \"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example", "int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF reached whilst still capturing", "\"\"\"Finds snippets in lines of text.\"\"\" examples = Examples() line_index = 0 for", "not self._current_example: return line start = self._current_example.strip_number return line[start:].rstrip() @property def all(self) ->", "clean_line, line_num) # add this line of code to the example block examples.add_line(clean_line)", "\"\"\"Gets the example strip number.\"\"\" return self._strip @property def key(self) -> tuple: \"\"\"Gets", "raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self, line_num: int)", "line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int)", "self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if", "at {self.debug_id} ({line_num})\") self._cloaking = False @property def is_cloaking(self) -> bool: \"\"\"States whether", "def validate_dedent(self, line: str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if not self._current_example:", "in lines of text.\"\"\" examples = Examples() line_index = 0 for line_num, line", "blocks examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag in line:", "str(self.key) class Examples: \"\"\"All the examples in a file.\"\"\" def __init__(self) -> None:", "config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num) continue # whilst", "line_num: int) -> None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if trigger in", "config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line of", "self._strip = len(line) - len(line.lstrip()) self._text: List[str] = list() self._cloaking = False def", "line_num: int) -> None: \"\"\"Stores current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet", "not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End", "example.\"\"\" def __init__(self, path: str, line_num: int, example_name: str, line: str) -> None:", "__init__(self, path: str, line_num: int, example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key", "clean_line(self, line: str) -> str: \"\"\"Cleans a line.\"\"\" if not self._current_example: return line", "self._text @property def strip_number(self) -> int: \"\"\"Gets the example strip number.\"\"\" return self._strip", "def cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self,", "def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def", "line_index = 0 for line_num, line in enumerate(lines): line_index = line_num if config.start_flag", "continue for r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num)", "@property def strip_number(self) -> int: \"\"\"Gets the example strip number.\"\"\" return self._strip @property", "exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self, line: str) -> str: \"\"\"Cleans", "or not.\"\"\" return len(self._text) == 0 @property def text(self) -> List[str]: \"\"\"Gets example", "lines to the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if", "line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()):", "return self._key @property def debug_id(self) -> str: \"\"\"Gets some debug information about the", "cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at", "{self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\" if", "self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self, line_num:", "examples in a file.\"\"\" def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list()", "still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str) -> None: \"\"\"Adds a", "def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self,", "str) -> dict: \"\"\"Finds snippets in lines of text.\"\"\" examples = Examples() line_index", "Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing", "return str(self.key) class Examples: \"\"\"All the examples in a file.\"\"\" def __init__(self) ->", "examples.store_current(line_num) continue if config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num)", "= None def cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num)", "code lines to the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line)", "return self._cloaking @property def is_empty(self) -> bool: \"\"\"States whether the example is empty", "{self.debug_id} ({line_num})\") self._cloaking = False @property def is_cloaking(self) -> bool: \"\"\"States whether it's", "of code to the example block examples.add_line(clean_line) examples.end(line_index) return {example.key: example.text for example", "-> str: \"\"\"Cleans a line.\"\"\" if not self._current_example: return line start = self._current_example.strip_number", "next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag in", "({line_num})\") self._current_example = example def store_current(self, line_num: int) -> None: \"\"\"Stores current example.\"\"\"", "if trigger in line: debug_info = self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected", "a line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self, line_num: int) -> None: \"\"\"Starts", "line_num) clean_line = examples.clean_line(line) if any(match in clean_line for match in config.drop_lines): continue", "the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict:", "some debug information about the example.\"\"\" return str(self.key) class Examples: \"\"\"All the examples", "if any(match in clean_line for match in config.drop_lines): continue for r_before, r_after in", "def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict: \"\"\"Finds snippets in lines", "line: debug_info = self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at", "line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int) -> None: \"\"\"Validates", "information about the example.\"\"\" return str(self.key) class Examples: \"\"\"All the examples in a", "@property def is_cloaking(self) -> bool: \"\"\"States whether it's in cloaking mode.\"\"\" return self._cloaking", "all the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines: list, path: str) ->", "__init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example] = None def", "\"\"\"Gets example text.\"\"\" return self._text @property def strip_number(self) -> int: \"\"\"Gets the example", "self._current_example.debug_id if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def", "key(self) -> tuple: \"\"\"Gets the example key.\"\"\" return self._key @property def debug_id(self) ->", "return line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets all the examples.\"\"\" return self._examples", "example strip number.\"\"\" return self._strip @property def key(self) -> tuple: \"\"\"Gets the example", "line_num, example_name) self._strip = len(line) - len(line.lstrip()) self._text: List[str] = list() self._cloaking =", "False @property def is_cloaking(self) -> bool: \"\"\"States whether it's in cloaking mode.\"\"\" return", "self._text.append(line) def cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already", "(C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet", "-> list: \"\"\"Gets all the examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines: list,", "current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example", "from typing import List, Optional from snippet import exceptions from snippet.config import Config", "debug information about the example.\"\"\" return str(self.key) class Examples: \"\"\"All the examples in", "-> None: \"\"\"Stores current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at", "None def cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def", "in clean_line for match in config.drop_lines): continue for r_before, r_after in config.replacements.items(): clean_line", "line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at", "line_num: int) -> None: \"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at", "= Examples() line_index = 0 for line_num, line in enumerate(lines): line_index = line_num", "if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking = True def uncloak(self,", "return self._text @property def strip_number(self) -> int: \"\"\"Gets the example strip number.\"\"\" return", "fail_on_contains: if trigger in line: debug_info = self._current_example.debug_id if self._current_example else \"\" raise", "= len(line) - len(line.lstrip()) self._text: List[str] = list() self._cloaking = False def add_line(self,", "continue if config.uncloak_flag in line: examples.uncloak(line_num) continue if config.cloak_flag in line: examples.cloak(line_num) continue", "self._key @property def debug_id(self) -> str: \"\"\"Gets some debug information about the example.\"\"\"", "f\"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty:", "self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\"", "\"\"\"Adds a line.\"\"\" if self._current_example: self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int) ->", "line=line), line_num ) continue if config.end_flag in line: # stop capturing, and discard", "exceptions.StartEndMismatch( f\"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str)", "if config.end_flag in line: # stop capturing, and discard empty blocks examples.store_current(line_num) continue", "capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\"", "= clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line of code to", "typing import List, Optional from snippet import exceptions from snippet.config import Config class", "(path, line_num, example_name) self._strip = len(line) - len(line.lstrip()) self._text: List[str] = list() self._cloaking", "\"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked at {self.debug_id} ({line_num})\") self._cloaking = True", "self._cloaking = False @property def is_cloaking(self) -> bool: \"\"\"States whether it's in cloaking", "examples.clean_line(line) if any(match in clean_line for match in config.drop_lines): continue for r_before, r_after", "2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\"", "import exceptions from snippet.config import Config class Example: \"\"\"An example.\"\"\" def __init__(self, path:", "\"\"\"Gets the example key.\"\"\" return self._key @property def debug_id(self) -> str: \"\"\"Gets some", "return len(self._text) == 0 @property def text(self) -> List[str]: \"\"\"Gets example text.\"\"\" return", "{self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self, line_num: int) -> None: \"\"\"Stores current", "the example strip number.\"\"\" return self._strip @property def key(self) -> tuple: \"\"\"Gets the", "if config.cloak_flag in line: examples.cloak(line_num) continue # whilst capturing, append code lines to", "line_num if config.start_flag in line: # start capturing code from the next line", "self._current_example.strip_number return line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets all the examples.\"\"\" return", "config.drop_lines): continue for r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line,", "\"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example =", "({line_num})\") def clean_line(self, line: str) -> str: \"\"\"Cleans a line.\"\"\" if not self._current_example:", "\"\"\"Text snippet extractor.\"\"\" from typing import List, Optional from snippet import exceptions from", "def set_current(self, example: Example, line_num: int) -> None: \"\"\"Sets current example.\"\"\" if self._current_example:", "self._current_example = example def store_current(self, line_num: int) -> None: \"\"\"Stores current example.\"\"\" if", "def cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise exceptions.CloakMismatch(f\"Already cloaked", "if self._current_example else \"\" raise exceptions.ValidationFailure(f\"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})\") def clean_line(self,", "example: Example, line_num: int) -> None: \"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already", "{self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num:", "return self._examples def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict: \"\"\"Finds snippets", "-> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example] = None def set_current(self,", "int: \"\"\"Gets the example strip number.\"\"\" return self._strip @property def key(self) -> tuple:", "strip_number(self) -> int: \"\"\"Gets the example strip number.\"\"\" return self._strip @property def key(self)", "code from the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue", "# \"\"\"Text snippet extractor.\"\"\" from typing import List, Optional from snippet import exceptions", "store_current(self, line_num: int) -> None: \"\"\"Stores current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not", "-> None: \"\"\"Validates dedent.\"\"\" if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure(", "snippet extractor.\"\"\" from typing import List, Optional from snippet import exceptions from snippet.config", "clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add this line of code to the", "example_name) self._strip = len(line) - len(line.lstrip()) self._text: List[str] = list() self._cloaking = False", "set_current(self, example: Example, line_num: int) -> None: \"\"\"Sets current example.\"\"\" if self._current_example: raise", "self._examples.append(self._current_example) self._current_example = None def cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if", "self._key = (path, line_num, example_name) self._strip = len(line) - len(line.lstrip()) self._text: List[str] =", "Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag in line: # stop", "int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) ->", "the next line examples.set_current( Example(path=path, line_num=line_num, example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag", "Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text", "def __init__(self, path: str, line_num: int, example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\"", "debug_id(self) -> str: \"\"\"Gets some debug information about the example.\"\"\" return str(self.key) class", "cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if", "= list() self._current_example: Optional[Example] = None def set_current(self, example: Example, line_num: int) ->", "fail_on_contains: List[str], line: str, line_num: int) -> None: \"\"\"Validates line.\"\"\" for trigger in", "Optional from snippet import exceptions from snippet.config import Config class Example: \"\"\"An example.\"\"\"", "\"\"\"Stores current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if", "Example, line_num: int) -> None: \"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing", "whether the example is empty or not.\"\"\" return len(self._text) == 0 @property def", "in cloaking mode.\"\"\" return self._cloaking @property def is_empty(self) -> bool: \"\"\"States whether the", "def __init__(self) -> None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example] = None", "at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached whilst still cloaked", "capturing, append code lines to the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line", "self._strip @property def key(self) -> tuple: \"\"\"Gets the example key.\"\"\" return self._key @property", "any(match in clean_line for match in config.drop_lines): continue for r_before, r_after in config.replacements.items():", "r_before, r_after in config.replacements.items(): clean_line = clean_line.replace(r_before, r_after) examples.validate_line(config.fail_on_contains, clean_line, line_num) # add", "uncloak(self, line_num: int) -> None: \"\"\"Stops cloaking.\"\"\" if self._current_example: self._current_example.uncloak(line_num) def end(self, line_num:", "Optional[Example] = None def set_current(self, example: Example, line_num: int) -> None: \"\"\"Sets current", "All rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from typing import", "text.\"\"\" examples = Examples() line_index = 0 for line_num, line in enumerate(lines): line_index", "import Config class Example: \"\"\"An example.\"\"\" def __init__(self, path: str, line_num: int, example_name:", "yet capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached whilst", "examples.\"\"\" return self._examples def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict: \"\"\"Finds", "line start = self._current_example.strip_number return line[start:].rstrip() @property def all(self) -> list: \"\"\"Gets all", "if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self,", "\"\"\"States whether the example is empty or not.\"\"\" return len(self._text) == 0 @property", "strip number.\"\"\" return self._strip @property def key(self) -> tuple: \"\"\"Gets the example key.\"\"\"", "example_name=line.rsplit(\":\")[-1].strip(), line=line), line_num ) continue if config.end_flag in line: # stop capturing, and", "\"\"\"States whether it's in cloaking mode.\"\"\" return self._cloaking @property def is_empty(self) -> bool:", "int) -> None: \"\"\"Stores current example.\"\"\" if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing", "-> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num: int) -> None:", "Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # \"\"\"Text snippet extractor.\"\"\" from", "example reached whilst still cloaked {self._current_example.debug_id} ({line_num})\" ) if not self._current_example.is_empty: self._examples.append(self._current_example) self._current_example", "line: str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip = len(line)", "the example key.\"\"\" return self._key @property def debug_id(self) -> str: \"\"\"Gets some debug", "code to the example block examples.add_line(clean_line) examples.end(line_index) return {example.key: example.text for example in", "self._current_example.add_line(line) def validate_dedent(self, line: str, line_num: int) -> None: \"\"\"Validates dedent.\"\"\" if not", "self._current_example.uncloak(line_num) def end(self, line_num: int) -> None: \"\"\"Ends.\"\"\" if self._current_example: raise exceptions.StartEndMismatch( f\"EOF", "example_name: str, line: str) -> None: \"\"\"Initialiser.\"\"\" self._key = (path, line_num, example_name) self._strip", "if not self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch(", "append code lines to the current block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line =", "str) -> None: \"\"\"Adds a line.\"\"\" if self._cloaking: return self._text.append(line) def cloak(self, line_num:", "-> None: \"\"\"Sets current example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\")", "list() self._cloaking = False def add_line(self, line: str) -> None: \"\"\"Adds a line.\"\"\"", "-> List[str]: \"\"\"Gets example text.\"\"\" return self._text @property def strip_number(self) -> int: \"\"\"Gets", "example def store_current(self, line_num: int) -> None: \"\"\"Stores current example.\"\"\" if not self._current_example:", "example key.\"\"\" return self._key @property def debug_id(self) -> str: \"\"\"Gets some debug information", "None: \"\"\"Stops cloaking.\"\"\" if not self._cloaking: raise exceptions.CloakMismatch(f\"Already uncloaked at {self.debug_id} ({line_num})\") self._cloaking", "whilst still capturing {self._current_example.debug_id} ({line_num})\" ) def add_line(self, line: str) -> None: \"\"\"Adds", "return self._text.append(line) def cloak(self, line_num: int) -> None: \"\"\"Starts cloaking.\"\"\" if self._cloaking: raise", "= list() self._cloaking = False def add_line(self, line: str) -> None: \"\"\"Adds a", "-> tuple: \"\"\"Gets the example key.\"\"\" return self._key @property def debug_id(self) -> str:", "if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match in clean_line for match", "@property def all(self) -> list: \"\"\"Gets all the examples.\"\"\" return self._examples def extract_snippets_from_text(config:", "= None def set_current(self, example: Example, line_num: int) -> None: \"\"\"Sets current example.\"\"\"", "example.\"\"\" if self._current_example: raise exceptions.StartEndMismatch(f\"Already capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example def", "# Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 #", "dedent.\"\"\" if not self._current_example: return if any(line[: self._current_example.strip_number].lstrip()): raise exceptions.ValidationFailure( f\"Unexpected dedent whilst", "block if config.fail_on_dedent: examples.validate_dedent(line, line_num) clean_line = examples.clean_line(line) if any(match in clean_line for", "{debug_info} ({line_num})\") def clean_line(self, line: str) -> str: \"\"\"Cleans a line.\"\"\" if not", "\"\"\"Cleans a line.\"\"\" if not self._current_example: return line start = self._current_example.strip_number return line[start:].rstrip()", "cloak(self, line_num: int) -> None: \"\"\"Start cloaking.\"\"\" if self._current_example: self._current_example.cloak(line_num) def uncloak(self, line_num:", "exceptions from snippet.config import Config class Example: \"\"\"An example.\"\"\" def __init__(self, path: str,", "capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached whilst still", "f\"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})\" ) def validate_line(self, fail_on_contains: List[str], line: str,", "line: str, line_num: int) -> None: \"\"\"Validates line.\"\"\" for trigger in fail_on_contains: if", "capturing at {self._current_example.debug_id} ({line_num})\") self._current_example = example def store_current(self, line_num: int) -> None:", "self._current_example: raise exceptions.StartEndMismatch(f\"Not yet capturing at {line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of", "if not self._current_example: return line start = self._current_example.strip_number return line[start:].rstrip() @property def all(self)", "None: \"\"\"Initialiser.\"\"\" self._examples: List[Example] = list() self._current_example: Optional[Example] = None def set_current(self, example:", "config.start_flag in line: # start capturing code from the next line examples.set_current( Example(path=path,", "{line_num}\") if self._current_example.is_cloaking: raise exceptions.CloakMismatch( f\"End of example reached whilst still cloaked {self._current_example.debug_id}", "cloaking mode.\"\"\" return self._cloaking @property def is_empty(self) -> bool: \"\"\"States whether the example" ]
[ "= ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass =", "'/', '/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary =", "['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html',", "for write #0 => not allowed #1 => allowed dir_per = { '/',", "#1 => allowed dir_per = { '/', '/assets', '/proxy', '/samplefortemp' } prox =", "allowed dir_per = { '/', '/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user", "['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html']", "permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html',", "prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized", "authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html',", "allowed #1 => allowed dir_per = { '/', '/assets', '/proxy', '/samplefortemp' } prox", "'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html', 'suppliers.html', 'transactions.html', '/samplefortemp/perm.html', '/samplefortemp/temp.html',", "dictionary with permissions about dir. #first one is for read #second for write", "is dictionary with permissions about dir. #first one is for read #second for", "= {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html',", "['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>']", "#this is dictionary with permissions about dir. #first one is for read #second", "'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html', 'suppliers.html', 'transactions.html', '/samplefortemp/perm.html', '/samplefortemp/temp.html', '/proxy/proxyfile.html'", "#0 => not allowed #1 => allowed dir_per = { '/', '/assets', '/proxy',", "dir_per = { '/', '/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user =", "{ 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html', 'suppliers.html', 'transactions.html',", "'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html', 'suppliers.html', 'transactions.html', '/samplefortemp/perm.html',", "read #second for write #0 => not allowed #1 => allowed dir_per =", "=> allowed dir_per = { '/', '/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html']", "'/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent", "{'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = {", "{ '/', '/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary", "proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass", "= ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html',", "write #0 => not allowed #1 => allowed dir_per = { '/', '/assets',", "'/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'}", "permissions about dir. #first one is for read #second for write #0 =>", "not allowed #1 => allowed dir_per = { '/', '/assets', '/proxy', '/samplefortemp' }", "{'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon',", "dir. #first one is for read #second for write #0 => not allowed", "= { '/', '/assets', '/proxy', '/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123']", "#first one is for read #second for write #0 => not allowed #1", "=> not allowed #1 => allowed dir_per = { '/', '/assets', '/proxy', '/samplefortemp'", "= { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html', 'suppliers.html',", "auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html',", "#second for write #0 => not allowed #1 => allowed dir_per = {", "one is for read #second for write #0 => not allowed #1 =>", "with permissions about dir. #first one is for read #second for write #0", "= {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per =", "about dir. #first one is for read #second for write #0 => not", "'/samplefortemp' } prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent =", "for read #second for write #0 => not allowed #1 => allowed dir_per", "temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized = ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per", "file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html',", "'form.html', 'index.html', 'menu.html', 'new-user.html', 'orders.html', 'purchases.html', 'staff.html', 'suppliers.html', 'transactions.html', '/samplefortemp/perm.html', '/samplefortemp/temp.html', '/proxy/proxyfile.html' }", "= ['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html',", "} prox = ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'}", "is for read #second for write #0 => not allowed #1 => allowed", "['./documentroot/auth_file.html'] auth_pass = ['<PASSWORD>'] file_per = { 'auth_file.html', 'dashboard.html', 'favicon.icon', 'form.html', 'index.html', 'menu.html',", "= ['./documentroot/proxy/proxyfile.html'] proxy_user = ['ram:123'] temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'} permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'} authorized =" ]
[ "2: freq = int(argv[2]) file_found = 0 while not file_found: dirFiles = os.listdir(fileDir)", "in dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if", "= 0 while not file_found: dirFiles = os.listdir(fileDir) if file in dirFiles: file_found", "file_found: dirFiles = os.listdir(fileDir) if file in dirFiles: file_found = 1 else: time.sleep(freq)", "sys, os, time def usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\"", "python import sys, os, time def usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency", "usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\" sys.exit(1) def main(argv): fileDir", "[frequency - default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0] file = argv[1] freq", "> 2: freq = int(argv[2]) file_found = 0 while not file_found: dirFiles =", "= os.listdir(fileDir) if file in dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0) if", "dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if len(sys.argv)", "os.listdir(fileDir) if file in dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0) if __name__", "time def usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\" sys.exit(1) def", "1 else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if len(sys.argv) == 1: usage()", "import sys, os, time def usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency -", "0 while not file_found: dirFiles = os.listdir(fileDir) if file in dirFiles: file_found =", "file = argv[1] freq = 60 if len(argv) > 2: freq = int(argv[2])", "= argv[0] file = argv[1] freq = 60 if len(argv) > 2: freq", "dirFiles = os.listdir(fileDir) if file in dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0)", "while not file_found: dirFiles = os.listdir(fileDir) if file in dirFiles: file_found = 1", "fileDir = argv[0] file = argv[1] freq = 60 if len(argv) > 2:", "else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if len(sys.argv) == 1: usage() else:", "[file's dir] [file] [frequency - default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0] file", "= int(argv[2]) file_found = 0 while not file_found: dirFiles = os.listdir(fileDir) if file", "file_found = 0 while not file_found: dirFiles = os.listdir(fileDir) if file in dirFiles:", "time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if len(sys.argv) == 1: usage() else: main(sys.argv[1:])", "def main(argv): fileDir = argv[0] file = argv[1] freq = 60 if len(argv)", "argv[1] freq = 60 if len(argv) > 2: freq = int(argv[2]) file_found =", "freq = 60 if len(argv) > 2: freq = int(argv[2]) file_found = 0", "argv[0] file = argv[1] freq = 60 if len(argv) > 2: freq =", "int(argv[2]) file_found = 0 while not file_found: dirFiles = os.listdir(fileDir) if file in", "print \"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\" sys.exit(1) def main(argv): fileDir =", "file in dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\":", "def usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\" sys.exit(1) def main(argv):", "- default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0] file = argv[1] freq =", "not file_found: dirFiles = os.listdir(fileDir) if file in dirFiles: file_found = 1 else:", "dir] [file] [frequency - default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0] file =", "= argv[1] freq = 60 if len(argv) > 2: freq = int(argv[2]) file_found", "if file in dirFiles: file_found = 1 else: time.sleep(freq) sys.exit(0) if __name__ ==", "= 1 else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if len(sys.argv) == 1:", "os, time def usage(): print \"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\" sys.exit(1)", "\"WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0]", "len(argv) > 2: freq = int(argv[2]) file_found = 0 while not file_found: dirFiles", "60 if len(argv) > 2: freq = int(argv[2]) file_found = 0 while not", "main(argv): fileDir = argv[0] file = argv[1] freq = 60 if len(argv) >", "file_found = 1 else: time.sleep(freq) sys.exit(0) if __name__ == \"__main__\": if len(sys.argv) ==", "#!/usr/bin/env python import sys, os, time def usage(): print \"WF_WaitForFile.py [file's dir] [file]", "[file] [frequency - default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0] file = argv[1]", "default=60s]\" sys.exit(1) def main(argv): fileDir = argv[0] file = argv[1] freq = 60", "= 60 if len(argv) > 2: freq = int(argv[2]) file_found = 0 while", "if len(argv) > 2: freq = int(argv[2]) file_found = 0 while not file_found:", "freq = int(argv[2]) file_found = 0 while not file_found: dirFiles = os.listdir(fileDir) if", "sys.exit(1) def main(argv): fileDir = argv[0] file = argv[1] freq = 60 if" ]
[ "exhaust.State): return state.maybe() space = exhaust.space(gen) assert len(set(space)) == 2 assert len(set(space)) ==", "return state.maybe() space = exhaust.space(gen) assert len(set(space)) == 2 assert len(set(space)) == 2", "exhaust def test_double_iteration(): def gen(state: exhaust.State): return state.maybe() space = exhaust.space(gen) assert len(set(space))", "import exhaust def test_double_iteration(): def gen(state: exhaust.State): return state.maybe() space = exhaust.space(gen) assert", "def gen(state: exhaust.State): return state.maybe() space = exhaust.space(gen) assert len(set(space)) == 2 assert", "def test_double_iteration(): def gen(state: exhaust.State): return state.maybe() space = exhaust.space(gen) assert len(set(space)) ==", "test_double_iteration(): def gen(state: exhaust.State): return state.maybe() space = exhaust.space(gen) assert len(set(space)) == 2", "gen(state: exhaust.State): return state.maybe() space = exhaust.space(gen) assert len(set(space)) == 2 assert len(set(space))" ]
[ "get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if", "cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus',", "return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs", "not results: return None for dn in results: vals = results[dn] vals['dn'] =", "return defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None,", "vals['dn'] = dn entries.append(vals) if len(entries) <= 1: return entries decorated = [(dict_[sort],", "'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes =", "what it can to strings, # we have to make these lists if", "asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def", "on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated =", "in results: vals = results[dn] vals['dn'] = dn entries.append(vals) if len(entries) <= 1:", "+= \"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi", "in domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi converts what", "for dict_ in entries] decorated.sort() if not ascending: decorated.reverse() result = [dict_ for", ") userlist = [] lengths = {} for user in domain_users: user_id =", "if val[n] == address: return user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"] return", "decorated.sort() if not ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated]", "hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') )", "= [dict_ for (key, dict_) in decorated] return lengths, result def get_user_info(uid, domain):", "address: return user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"] return None def get_users_list(domain,", "results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base,", "'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus',", "len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths,", "information. # ============================================================================= from string import join from formencode.variabledecode import variable_decode from pylons", "user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None,", "============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= # $URL:", "'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress',", "# ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= #", "for (key, dict_) in decorated] return result def address_exists_on_domain(domain, address): users = get_domain_users(", "user_id.upper().startswith(letter): userlist.append(user) # let's save some time and return right away if we", "licensing information. # ============================================================================= from string import join from formencode.variabledecode import variable_decode from", "return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults", "\"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = []", "'val': val} ) cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset", "+ request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes @beaker_cache(expire=300, query_args=True) def", "# -*- coding: utf-8 -*- # vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= #", "$ # $Rev: 84 $ # $LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright", "= [(dict_[sortby], dict_) for dict_ in userlist] decorated.sort() if not sort_ascending: decorated.reverse() result", "make these lists if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in", "cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list` from all users", "Aparently Genshi converts what it can to strings, # we have to make", "cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return", "in userlist.values()] decorated.sort() result = [dict_ for (key, dict_) in decorated] return result", "vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch", "\"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist", "{} # Aparently Genshi converts what it can to strings, # we have", "['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q = new CGI({\" for key,", "g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if not results: return None for dn", "84 $ # $LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright (C) 2006 Ufsoft.org", "# ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27", "val == address: return user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users", "attribute) ) @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid", "def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\",", "can to strings, # we have to make these lists if 'mailAlias' in", "sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results = to_unicode(", "2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27", "-*- coding: utf-8 -*- # vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id:", "dict_ in userlist] decorated.sort() if not sort_ascending: decorated.reverse() result = [dict_ for (key,", "strings, # we have to make these lists if 'mailAlias' in user: lengths[user_id]['aliases']", "=> ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q = new CGI({\" for", "\"FTPStatus\" ] ) userlist = [] lengths = {} for user in domain_users:", "[ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in users: for key,", "= [dict_ for (key, dict_) in decorated] return result def address_exists_on_domain(domain, address): users", "if isinstance(val, list): cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)} )", "= APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs,", "'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost',", "{} lengths[uid] = {} if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress'", "`attr_list` from all users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist =", "'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName',", "( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus',", "= get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in", "variable_decode from pylons import request, g, cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers", "'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn',", "{} for user in domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {} # Aparently", "g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict): cgi", "if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save some time and", "it for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] =", "'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias',", "domain_users = get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\",", "isinstance(val, list): cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)} ) else:", "from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache", "== 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save some time and return right", "@beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota']", "None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\", \"givenName\",", "and return right away if we don't need any sorting if len(userlist) <=", "s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000", "userlist] decorated.sort() if not sort_ascending: decorated.reverse() result = [dict_ for (key, dict_) in", "'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort',", "'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return", "None for dn in results: vals = results[dn] vals['dn'] = dn entries.append(vals) if", "1: return lengths, userlist decorated = [(dict_[sortby], dict_) for dict_ in userlist] decorated.sort()", "'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress',", "return lengths, result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain,", "$ # $LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright (C) 2006 Ufsoft.org -", "Nov 2006) $ # $Rev: 84 $ # $LastChangedBy: s0undt3ch $ # =============================================================================", "converts what it can to strings, # we have to make these lists", "# ============================================================================= from string import join from formencode.variabledecode import variable_decode from pylons import", "] ) userlist = [] lengths = {} for user in domain_users: user_id", "g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def", "defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict)", "{} if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards']", "asbool from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes =", "= variable_decode(params_dict) cgi_params = \"$q = new CGI({\" for key, val in params_dict.iteritems():", "'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost',", "lists if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards']", "uid = user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for", "address: return user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users(", "lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save", "http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $ #", "decorated = [(dict_[sortby], dict_) for dict_ in userlist] decorated.sort() if not sort_ascending: decorated.reverse()", "def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list` from all users on", "to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) )", "'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes", "[] if not results: return None for dn in results: vals = results[dn]", "def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list`", "for user in domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi", "+ domain, domain)) lengths = {} lengths[uid] = {} if 'mailAlias' in user_info:", "or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def", "return None for dn in results: vals = results[dn] vals['dn'] = dn entries.append(vals)", "def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode(", "decorated.reverse() result = [dict_ for (key, dict_) in decorated] return lengths, result def", "cache it for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota']", "in decorated] return lengths, result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@'", "# $LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright (C) 2006 Ufsoft.org - <NAME>", "key in updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl % ( {'key': key,", "list): cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)} ) else: cgi_params", "'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def", "{'key': key, 'val': val} ) cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi =", "update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id,", "sort_ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated] return lengths, result", "lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl =", "def address_exists_on_domain(domain, address): users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ]", "decorated = [(dict_[sort], dict_) for dict_ in entries] decorated.sort() if not ascending: decorated.reverse()", "get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id + '@' + request.POST['ispmanDomain'] return", "= int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'],", "= {} # Aparently Genshi converts what it can to strings, # we", "let's save some time and return right away if we don't need any", "result = [dict_ for (key, dict_) in decorated] return lengths, result def get_user_info(uid,", "dict_) in decorated] return lengths, result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid +", "# Aparently Genshi converts what it can to strings, # we have to", "= ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes',", "<<EMAIL>> # # Please view LICENSE for additional licensing information. # ============================================================================= from", "val} ) cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset =>", "return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour @beaker_cache(expire=3600,", "else: cgi_params += attrib_tpl % ( {'key': key, 'val': val} ) cgi_params +=", "dn entries.append(vals) if len(entries) <= 1: return entries decorated = [(dict_[sort], dict_) for", "asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) #", "@perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return", "len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s'", "join from formencode.variabledecode import variable_decode from pylons import request, g, cache from pylons.decorators.cache", "lengths[uid] = {} if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in", "def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain):", "+= attrib_tpl % ( {'key': key, 'val': val} ) cgi_params += \"\"\"}) or", "\"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q = new", "Genshi converts what it can to strings, # we have to make these", "'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn',", "\"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q = new CGI({\" for key, val", "beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf", "in userlist] decorated.sort() if not sort_ascending: decorated.reverse() result = [dict_ for (key, dict_)", "cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute):", "return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict", "additional licensing information. # ============================================================================= from string import join from formencode.variabledecode import variable_decode", "return user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain,", "domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict): cgi =", "return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list` from all", "============================================================================= # Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>> # # Please view", "return None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\",", "( {'key': key, 'val': join(val)} ) else: cgi_params += attrib_tpl % ( {'key':", "return bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return", "$ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon,", "# Please view LICENSE for additional licensing information. # ============================================================================= from string import", "============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov", "[ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\",", "len(userlist) <= 1: return lengths, userlist decorated = [(dict_[sortby], dict_) for dict_ in", "in user.iteritems(): if isinstance(val, list): for n in range(len(val)): if val[n] == address:", "= to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if not results: return None", "+0000 (Mon, 27 Nov 2006) $ # $Rev: 84 $ # $LastChangedBy: s0undt3ch", "APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope)", "pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF", "# $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $ # $Rev: 84", "get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths = {}", "def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1", "in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) #", "time and return right away if we don't need any sorting if len(userlist)", "APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid',", "2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it", "cgi_params += attrib_tpl % ( {'key': key, 'val': val} ) cgi_params += \"\"\"})", "val[n] == address: return user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"] return None", "-*- # vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27", "to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) #", "scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if", "in params_dict.iteritems(): if key in updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl %", "$Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $", "@perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id", "entries.append(vals) if len(entries) <= 1: return entries decorated = [(dict_[sort], dict_) for dict_", "from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept", "delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id + '@'", "= results[dn] vals['dn'] = dn entries.append(vals) if len(entries) <= 1: return entries decorated", "$LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $ # $Rev: 84 $", "'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress',", "'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve):", "= get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute)", "# $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006)", "============================================================================= from string import join from formencode.variabledecode import variable_decode from pylons import request,", "request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain):", "lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict =", "'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN'", "get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list` from", "= to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort() result", "ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base,", "if attrs is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) )", "ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ #", "#\"mailForwardingAddress\" ] ) for user in users: for key, val, in user.iteritems(): if", "= {} lengths[uid] = {} if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if", "\"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept", "dict_ in entries] decorated.sort() if not ascending: decorated.reverse() result = [dict_ for (key,", "decorated] return lengths, result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' +", "return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return", "results: vals = results[dn] vals['dn'] = dn entries.append(vals) if len(entries) <= 1: return", "\"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in users: for key, val,", "fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ # =============================================================================", "user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' =>", ") def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for", "def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict):", "cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import", "to strings, # we have to make these lists if 'mailAlias' in user:", "userlist = [] lengths = {} for user in domain_users: user_id = user['ispmanUserId']", "[dict_ for (key, dict_) in decorated] return lengths, result def get_user_info(uid, domain): user_info", "domain)) lengths = {} lengths[uid] = {} if 'mailAlias' in user_info: lengths[uid]['aliases'] =", "user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes", "Please view LICENSE for additional licensing information. # ============================================================================= from string import join", "\"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in users: for key, val, in user.iteritems():", "userlist.values()] decorated.sort() result = [dict_ for (key, dict_) in decorated] return result def", "\"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = [] lengths", "[(dict_[sortby], dict_) for dict_ in userlist] decorated.sort() if not sort_ascending: decorated.reverse() result =", "+ '@' + domain, domain)) lengths = {} lengths[uid] = {} if 'mailAlias'", "get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota')", "or user_id.upper().startswith(letter): userlist.append(user) # let's save some time and return right away if", ") def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the", "for additional licensing information. # ============================================================================= from string import join from formencode.variabledecode import", "for dict_ in userlist] decorated.sort() if not sort_ascending: decorated.reverse() result = [dict_ for", "if len(entries) <= 1: return entries decorated = [(dict_[sort], dict_) for dict_ in", "get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list` from all users on `domain`\"\"\"", "need any sorting if len(userlist) <= 1: return lengths, userlist decorated = [(dict_[sortby],", "\"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\"", "attrib_tpl % ( {'key': key, 'val': join(val)} ) else: cgi_params += attrib_tpl %", "dict_) for dict_ in entries] decorated.sort() if not ascending: decorated.reverse() result = [dict_", "= len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\"", "'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def", "decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort() result = [dict_ for", "query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] =", "users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user", "from all users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain,", "#attributes_to_retrieve): \"\"\"Function to get the `attr_list` from all users on `domain`\"\"\" if attr_list.count('ispmanUserId')", "user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl", "if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info", "cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi =", "{} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults", "return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid)))", "query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def", "= get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn']", "import join from formencode.variabledecode import variable_decode from pylons import request, g, cache from", "27 Nov 2006) $ # $Rev: 84 $ # $LastChangedBy: s0undt3ch $ #", "[dict_ for (key, dict_) in decorated] return result def address_exists_on_domain(domain, address): users =", "elif val == address: return user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None, sort_ascending=True):", "\"\"\"Function to get the `attr_list` from all users on `domain`\"\"\" if attr_list.count('ispmanUserId') <", "return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain))", "'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list):", "(key, dict_) in decorated] return lengths, result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid", "in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict):", "def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id +", "in entries] decorated.sort() if not ascending: decorated.reverse() result = [dict_ for (key, dict_)", "decorated.sort() result = [dict_ for (key, dict_) in decorated] return result def address_exists_on_domain(domain,", "@perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId',", "# $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py", "'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress'])", "if we don't need any sorting if len(userlist) <= 1: return lengths, userlist", "attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results", "if not ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated] return", "lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024", "attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q", "to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if not results: return None for", "( {'key': key, 'val': val} ) cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi", "away if we don't need any sorting if len(userlist) <= 1: return lengths,", "def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params", "to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults =", "CGI({\" for key, val in params_dict.iteritems(): if key in updatable_attributes: if isinstance(val, list):", "g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def", "base = APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter,", "'@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes @beaker_cache(expire=300, query_args=True)", "'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn',", "04:12:13 +0000 (Mon, 27 Nov 2006) $ # $Rev: 84 $ # $LastChangedBy:", "to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths = {} lengths[uid] = {} if", "'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user)", "# ============================================================================= # Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>> # # Please", "\"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi", "- <NAME> <<EMAIL>> # # Please view LICENSE for additional licensing information. #", "2006 Ufsoft.org - <NAME> <<EMAIL>> # # Please view LICENSE for additional licensing", "for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def", "# cache it for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {}", "vals = results[dn] vals['dn'] = dn entries.append(vals) if len(entries) <= 1: return entries", "key, 'val': join(val)} ) else: cgi_params += attrib_tpl % ( {'key': key, 'val':", "key, val in params_dict.iteritems(): if key in updatable_attributes: if isinstance(val, list): cgi_params +=", "= to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter)", "in updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl % ( {'key': key, 'val':", "= to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def", "user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] =", "def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is not", "result def address_exists_on_domain(domain, address): users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\"", "import variable_decode from pylons import request, g, cache from pylons.decorators.cache import beaker_cache from", "in users: for key, val, in user.iteritems(): if isinstance(val, list): for n in", "= [] if not results: return None for dn in results: vals =", "entries = [] if not results: return None for dn in results: vals", "=> \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def", "params_dict.iteritems(): if key in updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl % (", "s0undt3ch $ # ============================================================================= # Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>> #", "LICENSE for additional licensing information. # ============================================================================= from string import join from formencode.variabledecode", "def user_exists(user_id): uid = user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache", "'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain)", "not sort_ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated] return lengths,", "get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict): cgi", "(Mon, 27 Nov 2006) $ # $Rev: 84 $ # $LastChangedBy: s0undt3ch $", "letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\",", "@beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain))", "= cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell',", "len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All' or", "g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache", "attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort() result = [dict_", "Ufsoft.org - <NAME> <<EMAIL>> # # Please view LICENSE for additional licensing information.", "domain, domain)) lengths = {} lengths[uid] = {} if 'mailAlias' in user_info: lengths[uid]['aliases']", "g, cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators", "user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi converts what it can to strings,", "\"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = [] lengths = {} for", "domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in users: for", "user_exists(user_id): uid = user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it", "= {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return", "1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_ in", ") else: cgi_params += attrib_tpl % ( {'key': key, 'val': val} ) cgi_params", "to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour @beaker_cache(expire=3600, query_args=True)", "'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias',", "cache it for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2))", "\"$q = new CGI({\" for key, val in params_dict.iteritems(): if key in updatable_attributes:", "get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in users:", "lengths = {} lengths[uid] = {} if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias'])", "= get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id + '@' + request.POST['ispmanDomain']", "if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter):", "for key, val, in user.iteritems(): if isinstance(val, list): for n in range(len(val)): if", "from string import join from formencode.variabledecode import variable_decode from pylons import request, g,", "to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort() result =", "\"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = [] lengths =", "if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_)", "key, 'val': val} ) cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params)", "results: return None for dn in results: vals = results[dn] vals['dn'] = dn", "return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept", "'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to", "die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict):", "'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus',", "attrs is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else:", "formencode.variabledecode import variable_decode from pylons import request, g, cache from pylons.decorators.cache import beaker_cache", "get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) )", "return lengths, userlist decorated = [(dict_[sortby], dict_) for dict_ in userlist] decorated.sort() if", "{'key': key, 'val': join(val)} ) else: cgi_params += attrib_tpl % ( {'key': key,", "users: for key, val, in user.iteritems(): if isinstance(val, list): for n in range(len(val)):", "attrib_tpl % ( {'key': key, 'val': val} ) cgi_params += \"\"\"}) or die", "'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress',", "# we have to make these lists if 'mailAlias' in user: lengths[user_id]['aliases'] =", "= {} if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info:", "request, g, cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from", "cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid = user_id + '@' +", "scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is not None: results =", "decorated] return result def address_exists_on_domain(domain, address): users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\",", ") for user in users: for key, val, in user.iteritems(): if isinstance(val, list):", "user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths = {} lengths[uid] =", "for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode(", "have to make these lists if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if", "sorting if len(userlist) <= 1: return lengths, userlist decorated = [(dict_[sortby], dict_) for", "ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn',", "updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain',", "dict_) for dict_ in userlist.values()] decorated.sort() result = [dict_ for (key, dict_) in", "'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function", "'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress',", "'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' )", "# let's save some time and return right away if we don't need", "return right away if we don't need any sorting if len(userlist) <= 1:", "decorated.sort() if not sort_ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated]", "we have to make these lists if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias'])", "5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain):", "some time and return right away if we don't need any sorting if", "[] lengths = {} for user in domain_users: user_id = user['ispmanUserId'] lengths[user_id] =", "= to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return", "for dict_ in userlist.values()] decorated.sort() result = [dict_ for (key, dict_) in decorated]", "key, val, in user.iteritems(): if isinstance(val, list): for n in range(len(val)): if val[n]", "defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept", "for user in users: for key, val, in user.iteritems(): if isinstance(val, list): for", "= get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\",", "minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return", "'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser',", "lengths = {} for user in domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {}", "user in users: for key, val, in user.iteritems(): if isinstance(val, list): for n", "= ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId',", "in decorated] return result def address_exists_on_domain(domain, address): users = get_domain_users( domain, [ \"ispmanUserId\",", "ldap_filter, attrs, scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries =", "= [(dict_[sort], dict_) for dict_ in entries] decorated.sort() if not ascending: decorated.reverse() result", "cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base =", "right away if we don't need any sorting if len(userlist) <= 1: return", "'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess',", "these lists if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user:", "sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\",", "it can to strings, # we have to make these lists if 'mailAlias'", "Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>> # # Please view LICENSE for", "# vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z", "84 2006-11-27 04:12:13Z s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate:", "% ( {'key': key, 'val': val} ) cgi_params += \"\"\"}) or die \"$@\";\"\"\"", "in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if", "entries decorated = [(dict_[sort], dict_) for dict_ in entries] decorated.sort() if not ascending:", "'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain, attr_list): #attributes_to_retrieve): \"\"\"Function to get", "all users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list))", "ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory',", "for dn in results: vals = results[dn] vals['dn'] = dn entries.append(vals) if len(entries)", "1: return entries decorated = [(dict_[sort], dict_) for dict_ in entries] decorated.sort() if", "import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess',", "any sorting if len(userlist) <= 1: return lengths, userlist decorated = [(dict_[sortby], dict_)", "\"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist =", ") cgi_params += \"\"\"}) or die \"$@\";\"\"\" cgi = g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");')", "val, in user.iteritems(): if isinstance(val, list): for n in range(len(val)): if val[n] ==", "$ # ============================================================================= # Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>> # #", "def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour @beaker_cache(expire=3600, query_args=True) def", "return cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain,", "if not results: return None for dn in results: vals = results[dn] vals['dn']", "'val': join(val)} ) else: cgi_params += attrib_tpl % ( {'key': key, 'val': val}", "coding: utf-8 -*- # vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py", "= [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort() result = [dict_ for (key,", "get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour", "(C) 2006 Ufsoft.org - <NAME> <<EMAIL>> # # Please view LICENSE for additional", "cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)} ) else: cgi_params +=", "lengths, userlist decorated = [(dict_[sortby], dict_) for dict_ in userlist] decorated.sort() if not", "'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword',", "userlist decorated = [(dict_[sortby], dict_) for dict_ in userlist] decorated.sort() if not sort_ascending:", "to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi))", "[(dict_[sort], dict_) for dict_ in entries] decorated.sort() if not ascending: decorated.reverse() result =", "int(user_info['mailQuota'])/1024 return lengths, user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\"", "'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName')", "return result def address_exists_on_domain(domain, address): users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\",", "isinstance(val, list): for n in range(len(val)): if val[n] == address: return user[\"ispmanUserId\"] elif", "userlist.append(user) # let's save some time and return right away if we don't", "= \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q =", "04:12:13Z s0undt3ch $ # ============================================================================= # $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13", "$LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>>", "$ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $ # $Rev:", "2006) $ # $Rev: 84 $ # $LastChangedBy: s0undt3ch $ # ============================================================================= #", "sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\",", "\"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id,", "letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save some time and return", "results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if not results: return", "get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\",", "# # Please view LICENSE for additional licensing information. # ============================================================================= from string", "sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $", "get_domain_users( domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\",", "domain, attribute) ) @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id):", "utf-8 -*- # vim: sw=4 ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84", "'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword',", "result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths", "def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths =", "from pylons import request, g, cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import", "if isinstance(val, list): for n in range(len(val)): if val[n] == address: return user[\"ispmanUserId\"]", "not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results =", "user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter", "+ '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes @beaker_cache(expire=300,", "domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths = {} lengths[uid]", "= new CGI({\" for key, val in params_dict.iteritems(): if key in updatable_attributes: if", "return entries decorated = [(dict_[sort], dict_) for dict_ in entries] decorated.sort() if not", "\"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = [] lengths = {} for user", "add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base", "domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi converts what it", "2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $ # $Rev: 84 $ #", "\"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = [] lengths = {} for user in", "dn in results: vals = results[dn] vals['dn'] = dn entries.append(vals) if len(entries) <=", "<= 1: return lengths, userlist decorated = [(dict_[sortby], dict_) for dict_ in userlist]", "'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota',", "= to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain)) lengths = {} lengths[uid] = {}", "<= 1: return entries decorated = [(dict_[sort], dict_) for dict_ in entries] decorated.sort()", "'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain):", "list): for n in range(len(val)): if val[n] == address: return user[\"ispmanUserId\"] elif val", "view LICENSE for additional licensing information. # ============================================================================= from string import join from", "if not sort_ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated] return", "= \"$q = new CGI({\" for key, val in params_dict.iteritems(): if key in", ") else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if not", "join(val)} ) else: cgi_params += attrib_tpl % ( {'key': key, 'val': val} )", "'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = (", "= {} for user in domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {} #", "userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort()", "save some time and return right away if we don't need any sorting", "to make these lists if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress'", "for n in range(len(val)): if val[n] == address: return user[\"ispmanUserId\"] elif val ==", "attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for", "lengths, result def get_user_info(uid, domain): user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain))", "ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache =", "\"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ]", "range(len(val)): if val[n] == address: return user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"]", "= [] lengths = {} for user in domain_users: user_id = user['ispmanUserId'] lengths[user_id]", "we don't need any sorting if len(userlist) <= 1: return lengths, userlist decorated", "$URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $ # $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $", "val in params_dict.iteritems(): if key in updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl", "= len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota'] = int(user_info['mailQuota'])/1024 return", "# Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>> # # Please view LICENSE", "else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = [] if not results:", "user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's", "in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress']) user_info['mailQuota']", "< 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_", "'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid',", "== address: return user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users =", "for key, val in params_dict.iteritems(): if key in updatable_attributes: if isinstance(val, list): cgi_params", "variable_decode(params_dict) cgi_params = \"$q = new CGI({\" for key, val in params_dict.iteritems(): if", "'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def", "# cache it for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain,", "g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries", "n in range(len(val)): if val[n] == address: return user[\"ispmanUserId\"] elif val == address:", "g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is", "updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)}", ") entries = [] if not results: return None for dn in results:", "to get the `attr_list` from all users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1:", "\"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for user in users: for key, val, in", "if 'mailAlias' in user: lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] =", "None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results = to_unicode(", "dict_) in decorated] return result def address_exists_on_domain(domain, address): users = get_domain_users( domain, [", "if 'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] =", "to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict):", "if key in updatable_attributes: if isinstance(val, list): cgi_params += attrib_tpl % ( {'key':", "'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress', 'FTPStatus', 'FTPQuotaMBytes',", "is not None: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope) ) else: results", "] ) for user in users: for key, val, in user.iteritems(): if isinstance(val,", "not ascending: decorated.reverse() result = [dict_ for (key, dict_) in decorated] return result", "attr_list): #attributes_to_retrieve): \"\"\"Function to get the `attr_list` from all users on `domain`\"\"\" if", "# $Rev: 84 $ # $LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright (C)", "address): users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] ) for", "get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) ) def get_domain_vhost_count(domain): return to_unicode(g.ispman.getVhostCount(domain)) def get_domain_user_count(domain): return", "ldap_filter) ) entries = [] if not results: return None for dn in", "== address: return user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"] return None def", "it for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict( g.ispman.getDomainInfo(domain, 2)) )", "address_exists_on_domain(domain, address): users = get_domain_users( domain, [ \"ispmanUserId\", \"mailAlias\", \"mailLocalAddress\", #\"mailForwardingAddress\" ] )", "( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain',", "def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True):", "ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\", sort='ispmanUserId', ascending=True): base = APP_CONF['ispman_ldap_base_dn'] if attrs is not None:", "user_id = user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi converts what it can", "'mailAlias' in user_info: lengths[uid]['aliases'] = len(user_info['mailAlias']) if 'mailForwardingAddress' in user_info: lengths[uid]['forwards'] = len(user_info['mailForwardingAddress'])", ") @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict) return asbool(g.ispman.deleteUser(cgi)) def user_exists(user_id): uid =", "to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi)", "def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode(", "= g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber',", "(key, dict_) in decorated] return result def address_exists_on_domain(domain, address): users = get_domain_users( domain,", "ts=4 fenc=utf-8 # ============================================================================= # $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $ #", "= len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save some", "from formencode.variabledecode import variable_decode from pylons import request, g, cache from pylons.decorators.cache import", ") defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') ) return defaults @perlexcept def add_user(attrib_dict): cgi =", "from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = (", "the `attr_list` from all users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist", "attrs, scope) ) else: results = to_unicode( g.ispman.getEntriesAsHashRef(base, ldap_filter) ) entries = []", "don't need any sorting if len(userlist) <= 1: return lengths, userlist decorated =", "user_info def get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict)", "params_dict = variable_decode(params_dict) cgi_params = \"$q = new CGI({\" for key, val in", "g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber',", "+= attrib_tpl % ( {'key': key, 'val': join(val)} ) else: cgi_params += attrib_tpl", "new CGI({\" for key, val in params_dict.iteritems(): if key in updatable_attributes: if isinstance(val,", "<NAME> <<EMAIL>> # # Please view LICENSE for additional licensing information. # =============================================================================", "pylons import request, g, cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode,", "`domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'],", "get the `attr_list` from all users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId')", "$Rev: 84 $ # $LastChangedBy: s0undt3ch $ # ============================================================================= # Copyright (C) 2006", "= dn entries.append(vals) if len(entries) <= 1: return entries decorated = [(dict_[sort], dict_)", "lengths[user_id]['aliases'] = len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter ==", "if len(userlist) <= 1: return lengths, userlist decorated = [(dict_[sortby], dict_) for dict_", "= user_id + '@' + request.POST['ispmanDomain'] return bool(int(g.ispman.userExists(uid))) # cache it for 5", "return user[\"ispmanUserId\"] elif val == address: return user[\"ispmanUserId\"] return None def get_users_list(domain, letter,", ") return defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\",", "1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars(): defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota')", "len(entries) <= 1: return entries decorated = [(dict_[sort], dict_) for dict_ in entries]", "= len(user['mailAlias']) if 'mailForwardingAddress' in user: lengths[user_id]['forwards'] = len(user['mailForwardingAddress']) if letter == 'All'", "attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain, attribute) ) @perlexcept def delete_user(post_dict): cgi = get_perl_cgi(post_dict)", "g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi))", "import request, g, cache from pylons.decorators.cache import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool", "user in domain_users: user_id = user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi converts", "entries] decorated.sort() if not ascending: decorated.reverse() result = [dict_ for (key, dict_) in", "'mailQuota', 'mailAlias', 'sn', 'userPassword', 'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain', 'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost',", "\"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] ) userlist = [] lengths = {}", "for (key, dict_) in decorated] return lengths, result def get_user_info(uid, domain): user_info =", "allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp',", "\"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\", \"FTPQuotaMBytes\", \"FTPStatus\" ] )", "'@' + domain, domain)) lengths = {} lengths[uid] = {} if 'mailAlias' in", "dict_) for dict_ in userlist] decorated.sort() if not sort_ascending: decorated.reverse() result = [dict_", "import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman')", "= g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict)", "bool(int(g.ispman.userExists(uid))) # cache it for 5 minutes @beaker_cache(expire=300, query_args=True) def get_domain_info(domain): return to_unicode(dict(", "cgi = get_perl_cgi(attrib_dict) return asbool(g.ispman.update_user(cgi)) def get_user_attribute_values(id, domain, attribute): return to_unicode( g.ispman.getUserAttributeValues(id, domain,", "% ( {'key': key, 'val': join(val)} ) else: cgi_params += attrib_tpl % (", "defaults @perlexcept def add_user(attrib_dict): cgi = get_perl_cgi(attrib_dict) return g.ispman.addUser(cgi) def ldap_search(ldap_filter=\"objectClass=*\", attrs=None, scope=\"sub\",", "results[dn] vals['dn'] = dn entries.append(vals) if len(entries) <= 1: return entries decorated =", "get_perl_cgi(params_dict): attrib_tpl = \"\"\" '%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params =", "defaults = {} defaults['defaultUserFtpQuota'] = to_unicode( g.ispman.getConf('defaultUserFtpQuota') ) defaults['defaultUserMailQuota'] = to_unicode( g.ispman.getConf('defaultUserMailQuota') )", "result = [dict_ for (key, dict_) in decorated] return result def address_exists_on_domain(domain, address):", "'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress', 'userPassword', 'mailForwardingAddress', 'givenName') updatable_attributes = ( 'ispmanStatus', 'mailQuota',", "g.perl.eval(cgi_params) g.perl.eval('$q->header(-charset => \"UTF-8\");') return cgi @perlexcept def update_user_info(attrib_dict): cgi = get_perl_cgi(attrib_dict) return", "[(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()] decorated.sort() result = [dict_ for (key, dict_)", "attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()]", "lengths[user_id] = {} # Aparently Genshi converts what it can to strings, #", "cgi_params = \"$q = new CGI({\" for key, val in params_dict.iteritems(): if key", "'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess', 'radiusProfileDN' ) def get_cache(domain): return cache.get_cache(domain) def get_domain_users(domain,", "perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes = ( 'dn', 'dialupAccess', 'radiusProfileDn',", "user.iteritems(): if isinstance(val, list): for n in range(len(val)): if val[n] == address: return", "import beaker_cache from ispmanccp.lib.helpers import to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF =", "to_unicode, asbool from ispmanccp.lib.decorators import perlexcept APP_CONF = g.pylons_config.app_conf ispman_cache = cache.get_cache('ispman') allowed_user_attributes", "len(user['mailForwardingAddress']) if letter == 'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save some time", "user[\"ispmanUserId\"] return None def get_users_list(domain, letter, sortby=None, sort_ascending=True): domain_users = get_domain_users( domain, [", "'%(key)s' => ['%(val)s'], \"\"\" params_dict = variable_decode(params_dict) cgi_params = \"$q = new CGI({\"", "dict_ in userlist.values()] decorated.sort() result = [dict_ for (key, dict_) in decorated] return", "= user['ispmanUserId'] lengths[user_id] = {} # Aparently Genshi converts what it can to", "'All' or user_id.upper().startswith(letter): userlist.append(user) # let's save some time and return right away", "string import join from formencode.variabledecode import variable_decode from pylons import request, g, cache", "get_domain_user_count(domain): return to_unicode(g.ispman.getUserCount(domain)) # cache it for 1 hour @beaker_cache(expire=3600, query_args=True) def get_default_acount_vars():", "domain, [ \"dn\", \"givenName\", \"sn\", \"cn\", \"ispmanCreateTimestamp\", \"ispmanUserId\", \"mailLocalAddress\", \"mailForwardingAddress\", \"userPassword\", \"mailQuota\", \"mailAlias\",", "'uidNumber', 'gidNumber', 'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp', 'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort', 'mailQuota', 'mailHost', 'fileHost',", "users on `domain`\"\"\" if attr_list.count('ispmanUserId') < 1: attr_list.append('ispmanUserId') userlist = to_unicode(g.ispman.getUsers(domain, attr_list)) decorated", "in range(len(val)): if val[n] == address: return user[\"ispmanUserId\"] elif val == address: return" ]
[ "else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) +", "mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization': '/login/',", "headers = g.headers status = 501 return make_response('', status, headers) def put(self, pathname):", "= ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200, headers)", "response if __name__ == '__main__': app.fs_path = '/app/' app.fs_handler = utils.FilesystemHandler(app.fs_path, URI_BEGINNING_PATH['weeb']) generate_key()", "= request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey):", "ValueError: length = 0 if not request_data and length: try: request_data = request.form.items()[0][0]", "status = 501 return make_response('', status, headers) def copy(self, pathname): status = g.status", "response = make_response('', 401, headers) return response g.status = status_code g.headers = headers", "'/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not", "def get(self, pathname): status = g.status headers = g.headers status = 501 return", "headers) return response g.status = status_code g.headers = headers class weeb(MethodView): methods =", "info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else: return", "'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY',", "weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view )", "+ '\\n', status, headers) except IOError, e: response = make_response('Not found', 404, headers)", "= int(request.headers.get('Content-length')) except ValueError: length = 0 if not request_data and length: try:", "str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey) if cookie_value: s =", "path='/', domain=None, secure=True, httponly=True) else: return 'Something went wrong...' response.status = '301' #", "Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized():", "request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content)", "request, render_template, make_response, g, Response from flask.views import MethodView import urlparse import shutil", "'%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey)", "% (str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey) if cookie_value:", "+ \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type,", "= make_response('Not found', 404, headers) return response def delete(self, pathname): status = g.status", "verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true'", "= Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value:", "True return is_correct def is_authorized(): origin = request.headers.get('Origin') if origin is None: return", "origin is None: return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {}", "'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try:", "import Flask, request, render_template, make_response, g, Response from flask.views import MethodView import urlparse", "length = 0 if not request_data and length: try: request_data = request.form.items()[0][0] except", "Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers']", "length: try: request_data = request.form.items()[0][0] except IndexError: request_data = None return request_data def", "import Signer, base64_encode, base64_decode from flask import Flask, request, render_template, make_response, g, Response", "def verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key)", "cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ ==", "response = make_response(pf.create_response() + '\\n', status, headers) except IOError, e: response = make_response('Not", "== request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info),", "make_response('', status, headers) def propfind(self, pathname): status = g.status headers = g.headers pf", "base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None,", "response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'),", "== 'POST': response = make_response() if request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key)", "e: response = make_response('Not found', 404, headers) return response def delete(self, pathname): status", "'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not origin:", "'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data", "True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials']", "'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200, headers) return response else: s =", "= headers class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def", "\\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified,", "headers) def options(self, pathname): return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/',", "make_response(pf.create_response() + '\\n', status, headers) except IOError, e: response = make_response('Not found', 404,", "def delete(self, pathname): status = g.status headers = g.headers status = 501 return", "expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct = True return is_correct def", "g.headers status = 501 return make_response('', status, headers) def propfind(self, pathname): status =", "flask import Flask, request, render_template, make_response, g, Response from flask.views import MethodView import", "= \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header =", "specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif request.method == 'OPTIONS' and", "request_data = request.data try: length = int(request.headers.get('Content-length')) except ValueError: length = 0 if", "status = g.status headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler,", "response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else: return 'Something went wrong...'", "headers) def move(self, pathname): status = g.status headers = g.headers status = 501", "= Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/',", "is None: return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age']", "useragent = request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct =", "= make_response('', 401, headers) return response g.status = status_code g.headers = headers class", "make_response() if request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'):", "= generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else: return 'Something", "128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key =", "is_correct = False cookie_value = request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content =", "if not back else back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey", "501 return make_response('', status, headers) def copy(self, pathname): status = g.status headers =", "+ \\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401, headers) return response g.status", "make_response('', status, headers) def copy(self, pathname): status = g.status headers = g.headers status", "g.status headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'),", "'/' if not back else back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for", "s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct = True return is_correct def is_authorized(): origin", "for cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__", "from flask import Flask, request, render_template, make_response, g, Response from flask.views import MethodView", "went wrong...' response.status = '301' # response.headers['Location'] = '/' if not back else", "'POST']) def authorize(): origin = request.args.get('origin') if request.method == 'POST': response = make_response()", "Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct", "headers) return response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\", "except ValueError: length = 0 if not request_data and length: try: request_data =", "shutil import utils import os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE =", "+ \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response =", "'<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin') if request.method", "pathname): status = g.status headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname,", "'infinity'), self.get_body()) try: response = make_response(pf.create_response() + '\\n', status, headers) except IOError, e:", "g.status headers = g.headers status = 501 return make_response('', status, headers) def options(self,", "{} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding,", "request.data try: length = int(request.headers.get('Content-length')) except ValueError: length = 0 if not request_data", "make_response('', status, headers) def move(self, pathname): status = g.status headers = g.headers status", "generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin') useragent", "headers) def put(self, pathname): status = g.status headers = g.headers status = 501", "status_code = 200 elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods']", "def options(self, pathname): return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname':", "base64_decode from flask import Flask, request, render_template, make_response, g, Response from flask.views import", "= make_response(pf.create_response() + '\\n', status, headers) except IOError, e: response = make_response('Not found',", "request.headers.get('Origin') if origin is None: return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers", "origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif request.method == 'OPTIONS'", "response.status = '301' # response.headers['Location'] = '/' if not back else back else:", "headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin,", "= origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif request.method ==", "def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin')", "\\ 'Origin, Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+", "cookie_value: is_correct = True return is_correct def is_authorized(): origin = request.headers.get('Origin') if origin", "def get_body(self): request_data = request.data try: length = int(request.headers.get('Content-length')) except ValueError: length =", "make_response, g, Response from flask.views import MethodView import urlparse import shutil import utils", "headers = g.headers status = 501 return make_response('', status, headers) def propfind(self, pathname):", "os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return", "status, headers) def propfind(self, pathname): status = g.status headers = g.headers pf =", "propfind(self, pathname): status = g.status headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] +", "import urlparse import shutil import utils import os import mimetypes app = Flask(__name__.split('.')[0])", "status, headers) def options(self, pathname): return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule(", "= request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False", "WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code", "'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data =", "# response.headers['Location'] = '/' if not back else back else: response = make_response(render_template('authorization_page.html',", "= { 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24) def", "\\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers')", "'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back", "base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if", "Signer, base64_encode, base64_decode from flask import Flask, request, render_template, make_response, g, Response from", "= 128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key", "not origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s' % (str(origin),", "back_url=request.args.get('back_url'))) return response if __name__ == '__main__': app.fs_path = '/app/' app.fs_handler = utils.FilesystemHandler(app.fs_path,", "= request.headers.get('Origin') if origin is None: return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request():", "g.status headers = g.headers status = 501 return make_response('', status, headers) def propfind(self,", "make_response('', status, headers) def put(self, pathname): status = g.status headers = g.headers status", "request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response() + '\\n', status, headers) except IOError,", "501 return make_response('', status, headers) def propfind(self, pathname): status = g.status headers =", "options(self, pathname): return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''},", "request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ == '__main__': app.fs_path", "= s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct = True return is_correct def is_authorized():", "generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s'", "= False cookie_value = request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\", "= g.headers status = 501 return make_response('', status, headers) def move(self, pathname): status", "} def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin =", "= Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' +", "return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view )", "= make_response('', 200, headers) return response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie", "put(self, pathname): status = g.status headers = g.headers status = 501 return make_response('',", "specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('',", "== cookie_value: is_correct = True return is_correct def is_authorized(): origin = request.headers.get('Origin') if", "request_data = None return request_data def get(self, pathname): status = g.status headers =", "origin = request.headers.get('Origin') if origin is None: return True return verify_cookie(base64_encode(origin)) @app.before_request def", "app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def", "= weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view", "request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value", "'3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length, ' +", "g.headers status = 501 return make_response('', status, headers) def options(self, pathname): return make_response('',", "status, headers) def put(self, pathname): status = g.status headers = g.headers status =", "= 501 return make_response('', status, headers) def options(self, pathname): return make_response('', g.status, g.headers)", "', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200, headers) return", "make_response('Not found', 404, headers) return response def delete(self, pathname): status = g.status headers", "headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length,", "defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST'])", "'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200, headers) return response else:", "+ pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response() + '\\n', status,", "response.headers['Location'] = '/' if not back else back else: response = make_response(render_template('authorization_page.html', cookie_list=[", "httponly=True) else: return 'Something went wrong...' response.status = '301' # response.headers['Location'] = '/'", "status = 501 return make_response('', status, headers) def move(self, pathname): status = g.status", "Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match'", "request_data = request.form.items()[0][0] except IndexError: request_data = None return request_data def get(self, pathname):", "origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent)) def", "= 501 return make_response('', status, headers) def copy(self, pathname): status = g.status headers", "methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin') if request.method == 'POST': response =", "= '301' # response.headers['Location'] = '/' if not back else back else: response", "501 return make_response('', status, headers) def options(self, pathname): return make_response('', g.status, g.headers) weeb_view", "import os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH =", "headers) def copy(self, pathname): status = g.status headers = g.headers status = 501", "= request.data try: length = int(request.headers.get('Content-length')) except ValueError: length = 0 if not", "= request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200", "is_authorized(): status_code = 200 elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header", "404, headers) return response def delete(self, pathname): status = g.status headers = g.headers", "except IOError, e: response = make_response('Not found', 404, headers) return response def delete(self,", "origin = request.args.get('origin') if request.method == 'POST': response = make_response() if request.form.get('continue') !=", "try: request_data = request.form.items()[0][0] except IndexError: request_data = None return request_data def get(self,", "' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\", "return '%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value =", "def propfind(self, pathname): status = g.status headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb']", "request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None,", "= ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def", "headers) return response def delete(self, pathname): status = g.status headers = g.headers status", "= g.headers status = 501 return make_response('', status, headers) def put(self, pathname): status", "specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'])", "urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401,", "found', 404, headers) return response def delete(self, pathname): status = g.status headers =", "def generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s", "return make_response('', status, headers) def propfind(self, pathname): status = g.status headers = g.headers", "= {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept,", "'/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin", "response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization'])", "'POST': response = make_response() if request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key) if", "def copy(self, pathname): status = g.status headers = g.headers status = 501 return", "Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin", "headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response", "self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try: length = int(request.headers.get('Content-length')) except", "generate_key() s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back =", "cookie_value = request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content", "request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT',", "s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url')", "headers class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self):", "If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin')", "= specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response =", "headers = g.headers status = 501 return make_response('', status, headers) def options(self, pathname):", "view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize():", "IndexError: request_data = None return request_data def get(self, pathname): status = g.status headers", "request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else:", "flask.views import MethodView import urlparse import shutil import utils import os import mimetypes", "g.status headers = g.headers status = 501 return make_response('', status, headers) def move(self,", "status, headers) def move(self, pathname): status = g.status headers = g.headers status =", "s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content ==", "status, headers) except IOError, e: response = make_response('Not found', 404, headers) return response", "return make_response('', status, headers) def put(self, pathname): status = g.status headers = g.headers", "headers = g.headers status = 501 return make_response('', status, headers) def move(self, pathname):", "+ '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401, headers) return", "if __name__ == '__main__': app.fs_path = '/app/' app.fs_handler = utils.FilesystemHandler(app.fs_path, URI_BEGINNING_PATH['weeb']) generate_key() app.run(host=\"0.0.0.0\")", "utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response() +", "= 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type,", "base64_encode, base64_decode from flask import Flask, request, render_template, make_response, g, Response from flask.views", "= \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct = True", "g.status = status_code g.headers = headers class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND',", "= 0 if not request_data and length: try: request_data = request.form.items()[0][0] except IndexError:", "'+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin']", "g.headers status = 501 return make_response('', status, headers) def move(self, pathname): status =", "back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True,", "pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response() + '\\n', status, headers)", "\\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('',", "URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401, headers)", "expected_cookie_content == cookie_value: is_correct = True return is_correct def is_authorized(): origin = request.headers.get('Origin')", "request.method == 'POST': response = make_response() if request.form.get('continue') != 'true': generate_key() s =", "if request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key", "0 if not request_data and length: try: request_data = request.form.items()[0][0] except IndexError: request_data", "None return request_data def get(self, pathname): status = g.status headers = g.headers status", "= 501 return make_response('', status, headers) def move(self, pathname): status = g.status headers", "headers) except IOError, e: response = make_response('Not found', 404, headers) return response def", "s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key,", "pathname): return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view", "make_response('', 401, headers) return response g.status = status_code g.headers = headers class weeb(MethodView):", "length = int(request.headers.get('Content-length')) except ValueError: length = 0 if not request_data and length:", "status = 501 return make_response('', status, headers) def put(self, pathname): status = g.status", "Flask, request, render_template, make_response, g, Response from flask.views import MethodView import urlparse import", "self.get_body()) try: response = make_response(pf.create_response() + '\\n', status, headers) except IOError, e: response", "back else back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys()", "response g.status = status_code g.headers = headers class weeb(MethodView): methods = ['GET', 'PUT',", "g, Response from flask.views import MethodView import urlparse import shutil import utils import", "= 501 return make_response('', status, headers) def propfind(self, pathname): status = g.status headers", "is_authorized(): origin = request.headers.get('Origin') if origin is None: return True return verify_cookie(base64_encode(origin)) @app.before_request", "def move(self, pathname): status = g.status headers = g.headers status = 501 return", "int(request.headers.get('Content-length')) except ValueError: length = 0 if not request_data and length: try: request_data", "is_correct = True return is_correct def is_authorized(): origin = request.headers.get('Origin') if origin is", "'Something went wrong...' response.status = '301' # response.headers['Location'] = '/' if not back", "= request.args.get('origin') if request.method == 'POST': response = make_response() if request.form.get('continue') != 'true':", "except IndexError: request_data = None return request_data def get(self, pathname): status = g.status", "__init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try: length = int(request.headers.get('Content-length'))", "response = make_response('', 200, headers) return response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] =", "= status_code g.headers = headers class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY',", "methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb']", "g.headers status = 501 return make_response('', status, headers) def put(self, pathname): status =", "= '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length, '", "request.args.get('origin') if request.method == 'POST': response = make_response() if request.form.get('continue') != 'true': generate_key()", "= make_response() if request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin) ==", "URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try: length = int(request.headers.get('Content-length')) except ValueError: length", "'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try: length", "if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ == '__main__': app.fs_path =", "def is_authorized(): origin = request.headers.get('Origin') if origin is None: return True return verify_cookie(base64_encode(origin))", "200, headers) return response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' +", "= os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent')", "return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600'", "def before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] =", "response = make_response() if request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin)", "['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self):", "g.headers status = 501 return make_response('', status, headers) def copy(self, pathname): status =", "headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length, ' + \\", "not request_data and length: try: request_data = request.form.items()[0][0] except IndexError: request_data = None", "= 501 return make_response('', status, headers) def put(self, pathname): status = g.status headers", "return make_response('', status, headers) def move(self, pathname): status = g.status headers = g.headers", "status_code g.headers = headers class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE',", "== 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND',", "'Origin, Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\", "return 'Something went wrong...' response.status = '301' # response.headers['Location'] = '/' if not", "], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ == '__main__': app.fs_path = '/app/' app.fs_handler", "401, headers) return response g.status = status_code g.headers = headers class weeb(MethodView): methods", "Content-Length, ' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] =", "= '/' if not back else back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey)", "domain=None, secure=True, httponly=True) else: return 'Something went wrong...' response.status = '301' # response.headers['Location']", "if expected_cookie_content == cookie_value: is_correct = True return is_correct def is_authorized(): origin =", "URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin')", "if origin is None: return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers =", "None: return True return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age'] =", "(str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey) if cookie_value: s", "return is_correct def is_authorized(): origin = request.headers.get('Origin') if origin is None: return True", "status = 501 return make_response('', status, headers) def propfind(self, pathname): status = g.status", "import utils import os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000", "g.status headers = g.headers status = 501 return make_response('', status, headers) def put(self,", "BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key():", "authorize(): origin = request.args.get('origin') if request.method == 'POST': response = make_response() if request.form.get('continue')", "origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ == '__main__': app.fs_path = '/app/' app.fs_handler =", "g.status headers = g.headers status = 501 return make_response('', status, headers) def copy(self,", "= request.form.items()[0][0] except IndexError: request_data = None return request_data def get(self, pathname): status", "is_correct def is_authorized(): origin = request.headers.get('Origin') if origin is None: return True return", "make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return", "import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization':", "app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'],", "'/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET',", "urlparse import shutil import utils import os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__)", "response = make_response('Not found', 404, headers) return response def delete(self, pathname): status =", "Response from flask.views import MethodView import urlparse import shutil import utils import os", "back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey)", "return make_response('', status, headers) def copy(self, pathname): status = g.status headers = g.headers", "= request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True)", "'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if", "200 elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ',", "'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data", "wrong...' response.status = '301' # response.headers['Location'] = '/' if not back else back", "utils import os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH", "render_template, make_response, g, Response from flask.views import MethodView import urlparse import shutil import", "def __init__(self): self.baseuri = URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try: length =", "cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content", "else back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if", "make_response('', status, headers) def options(self, pathname): return make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav')", "= g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try:", "= True return is_correct def is_authorized(): origin = request.headers.get('Origin') if origin is None:", "'OPTIONS']) response = make_response('', 200, headers) return response else: s = Signer(app.secret_key) headers['WWW-Authenticate']", "501 return make_response('', status, headers) def move(self, pathname): status = g.status headers =", "''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def", "@app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers']", "URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response() + '\\n',", "'Content-Type, Authorization, Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate'", "\\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct = True return", "= URI_BEGINNING_PATH['weeb'] def get_body(self): request_data = request.data try: length = int(request.headers.get('Content-length')) except ValueError:", "request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers']", "get_body(self): request_data = request.data try: length = int(request.headers.get('Content-length')) except ValueError: length = 0", "False cookie_value = request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey))", "if is_authorized(): status_code = 200 elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] =", "'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200, headers) return response else: s", "before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] = 'true' headers['Access-Control-Allow-Headers'] = \\", "if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin)", "elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET',", "\\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401, headers) return response g.status =", "def put(self, pathname): status = g.status headers = g.headers status = 501 return", "headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif request.method", "headers = g.headers status = 501 return make_response('', status, headers) def copy(self, pathname):", "status, headers) def copy(self, pathname): status = g.status headers = g.headers status =", "status = 501 return make_response('', status, headers) def options(self, pathname): return make_response('', g.status,", "s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401, headers) return response g.status = status_code", "weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri =", "delete(self, pathname): status = g.status headers = g.headers status = 501 return make_response('',", "get(self, pathname): status = g.status headers = g.headers status = 501 return make_response('',", "= g.status headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth',", "request_data def get(self, pathname): status = g.status headers = g.headers status = 501", "view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin') if request.method ==", "URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24)", "IOError, e: response = make_response('Not found', 404, headers) return response def delete(self, pathname):", "app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb':", "= g.headers status = 501 return make_response('', status, headers) def copy(self, pathname): status", "origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code =", "+ '{&back_url,origin}' response = make_response('', 401, headers) return response g.status = status_code g.headers", ") app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin", "import MethodView import urlparse import shutil import utils import os import mimetypes app", "copy(self, pathname): status = g.status headers = g.headers status = 501 return make_response('',", "app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin =", "return response g.status = status_code g.headers = headers class weeb(MethodView): methods = ['GET',", "g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response", "g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb']", "!= 'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin))", "response def delete(self, pathname): status = g.status headers = g.headers status = 501", "= 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) +", "return response def delete(self, pathname): status = g.status headers = g.headers status =", "{ 'authorization': '/login/', 'weeb': '/weeb/wtf/', } def generate_key(): app.secret_key = os.urandom(24) def generate_cookie_info(origin=None):", "request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif", "'\\n', status, headers) except IOError, e: response = make_response('Not found', 404, headers) return", "'?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response = make_response('', 401, headers) return response", "else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey) ],", "if not request_data and length: try: request_data = request.form.items()[0][0] except IndexError: request_data =", "'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header = request.headers.get('Access-Control-Request-Headers') if", "headers = g.headers pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body())", "return verify_cookie(base64_encode(origin)) @app.before_request def before_request(): headers = {} headers['Access-Control-Max-Age'] = '3600' headers['Access-Control-Allow-Credentials'] =", "Depth, If-Modified-Since, '+ \\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin =", "= g.headers status = 501 return make_response('', status, headers) def propfind(self, pathname): status", "= request.headers.get('Access-Control-Request-Headers') if is_authorized(): status_code = 200 elif request.method == 'OPTIONS' and specific_header:", "pathname): status = g.status headers = g.headers status = 501 return make_response('', status,", "else: return 'Something went wrong...' response.status = '301' # response.headers['Location'] = '/' if", ") @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin') if request.method == 'POST':", "generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct = True return is_correct", "pf = utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response =", "not back else back else: response = make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in", "Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url') info =", "return response if __name__ == '__main__': app.fs_path = '/app/' app.fs_handler = utils.FilesystemHandler(app.fs_path, URI_BEGINNING_PATH['weeb'])", "'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin", "max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else: return 'Something went wrong...' response.status =", "= g.status headers = g.headers status = 501 return make_response('', status, headers) def", "= make_response(render_template('authorization_page.html', cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url')))", "headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200,", "'{&back_url,origin}' response = make_response('', 401, headers) return response g.status = status_code g.headers =", "request_data and length: try: request_data = request.form.items()[0][0] except IndexError: request_data = None return", "origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent))", "app.secret_key = os.urandom(24) def generate_cookie_info(origin=None): if not origin: origin = request.headers.get('Origin') useragent =", "if cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if", "expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content = s.get_signature(expected_cookie_content) if expected_cookie_content == cookie_value: is_correct =", "verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content", "os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = {", "in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ == '__main__':", "'MOVE', 'OPTIONS']) response = make_response('', 200, headers) return response else: s = Signer(app.secret_key)", "def authorize(): origin = request.args.get('origin') if request.method == 'POST': response = make_response() if", "= g.headers status = 501 return make_response('', status, headers) def options(self, pathname): return", "Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE = 128000 URI_BEGINNING_PATH = { 'authorization': '/login/', 'weeb': '/weeb/wtf/', }", "\\ 'If-None-Match' headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] =", "'301' # response.headers['Location'] = '/' if not back else back else: response =", "= utils.PropfindProcessor( URI_BEGINNING_PATH['weeb'] + pathname, app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response()", "%s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct = False cookie_value = request.cookies.get(cookey) if", "return response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root,", "if request.method == 'POST': response = make_response() if request.form.get('continue') != 'true': generate_key() s", "= \\ 'Origin, Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization, Depth, If-Modified-Since,", "= 200 elif request.method == 'OPTIONS' and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] =", "import shutil import utils import os import mimetypes app = Flask(__name__.split('.')[0]) app.config.from_object(__name__) BUFFER_SIZE", "'.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']) response = make_response('', 200, headers) return response", "make_response('', 200, headers) return response else: s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url='", "request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s' % (str(origin), str(useragent)) def verify_cookie(cookey): is_correct", "g.headers = headers class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']", "= None return request_data def get(self, pathname): status = g.status headers = g.headers", "from flask.views import MethodView import urlparse import shutil import utils import os import", "headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin)", "+ '<path:pathname>', view_func=weeb_view ) @app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin') if", "itsdangerous import Signer, base64_encode, base64_decode from flask import Flask, request, render_template, make_response, g,", "weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] + '<path:pathname>',", "make_response('', g.status, g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule(", "Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\", "= Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key = base64_encode(str(origin)) back = request.args.get('back_url') info", "headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization, Depth,", "expires=None, path='/', domain=None, secure=True, httponly=True) else: return 'Something went wrong...' response.status = '301'", "g.headers) weeb_view = weeb.as_view('dav') app.add_url_rule( '/weeb/wtf/', defaults={'pathname': ''}, view_func=weeb_view ) app.add_url_rule( URI_BEGINNING_PATH['weeb'] +", "501 return make_response('', status, headers) def put(self, pathname): status = g.status headers =", "app.fs_handler, request.headers.get('Depth', 'infinity'), self.get_body()) try: response = make_response(pf.create_response() + '\\n', status, headers) except", "@app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST']) def authorize(): origin = request.args.get('origin') if request.method == 'POST': response", "move(self, pathname): status = g.status headers = g.headers status = 501 return make_response('',", "and specific_header: headers['Access-Control-Request-Headers'] = specific_header headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE',", "try: response = make_response(pf.create_response() + '\\n', status, headers) except IOError, e: response =", "verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response if __name__ == '__main__': app.fs_path = '/app/'", "if not origin: origin = request.headers.get('Origin') useragent = request.headers.get('User-Agent') return '%s %s' %", "headers) def propfind(self, pathname): status = g.status headers = g.headers pf = utils.PropfindProcessor(", "from itsdangerous import Signer, base64_encode, base64_decode from flask import Flask, request, render_template, make_response,", "secure=True, httponly=True) else: return 'Something went wrong...' response.status = '301' # response.headers['Location'] =", "request.form.items()[0][0] except IndexError: request_data = None return request_data def get(self, pathname): status =", "request.form.get('continue') != 'true': generate_key() s = Signer(app.secret_key) if s.get_signature(origin) == request.args.get('sig'): key =", "'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}'", "= base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/',", "'true' headers['Access-Control-Allow-Headers'] = \\ 'Origin, Accept, Accept-Encoding, Content-Length, ' + \\ 'Content-Type, Authorization,", "headers['Access-Control-Expose-Headers'] = \\ 'Content-Type, Last-Modified, WWW-Authenticate' origin = request.headers.get('Origin') headers['Access-Control-Allow-Origin'] = origin specific_header", "return request_data def get(self, pathname): status = g.status headers = g.headers status =", "and length: try: request_data = request.form.items()[0][0] except IndexError: request_data = None return request_data", "status = g.status headers = g.headers status = 501 return make_response('', status, headers)", "= request.cookies.get(cookey) if cookie_value: s = Signer(app.secret_key) expected_cookie_content = \\ generate_cookie_info(base64_decode(cookey)) expected_cookie_content =", "return make_response('', status, headers) def options(self, pathname): return make_response('', g.status, g.headers) weeb_view =", "try: length = int(request.headers.get('Content-length')) except ValueError: length = 0 if not request_data and", "generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else: return 'Something went", "class weeb(MethodView): methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'] def __init__(self): self.baseuri", "key = base64_encode(str(origin)) back = request.args.get('back_url') info = generate_cookie_info(origin=origin) response.set_cookie(key, value=s.get_signature(info), max_age=None, expires=None,", "s = Signer(app.secret_key) headers['WWW-Authenticate'] = 'Nayookie login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig='", "login_url=' + \\ urlparse.urljoin(request.url_root, URI_BEGINNING_PATH['authorization']) + '?sig=' + \\ s.get_signature(origin) + '{&back_url,origin}' response", "value=s.get_signature(info), max_age=None, expires=None, path='/', domain=None, secure=True, httponly=True) else: return 'Something went wrong...' response.status", "cookie_list=[ base64_decode(cookey) for cookey in request.cookies.keys() if verify_cookie(cookey) ], origin=request.args.get('origin'), back_url=request.args.get('back_url'))) return response", "MethodView import urlparse import shutil import utils import os import mimetypes app =" ]
[ "name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i in reasons]) payload", "i in reasons]) payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]}", "in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key) if", "key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def", "reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i in reasons]) payload =", "not None and settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload( feature, challenge_id, name,", "headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return requests.post( join(settings.MAP_ROULETTE_API_URL, 'task'), headers=headers,", "format_challenge_task_payload( feature, challenge_id, name, reasons ) headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY", "if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload(", "feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if", "def remove_unneeded_properties(feature): keys_to_remove = [ key for key in feature['properties'].keys() if key.startswith('osm:') or", "def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i", "in reasons]) payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} }", "] for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return", "remove_unneeded_properties(feature): keys_to_remove = [ key for key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:')", "settings def remove_unneeded_properties(feature): keys_to_remove = [ key for key in feature['properties'].keys() if key.startswith('osm:')", "feature['properties']['osmcha_reasons'] = \", \".join([i for i in reasons]) payload = { \"parent\": challenge_id,", "if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons']", "= [ key for key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for", "reasons]) payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return", "json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL", "(settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload( feature,", "\".join([i for i in reasons]) payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\":", "\"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY", "= { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def", "return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and", "feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons):", "None): payload = format_challenge_task_payload( feature, challenge_id, name, reasons ) headers = { \"Content-Type\":", "feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name,", "if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]):", "and settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload( feature, challenge_id, name, reasons )", "\", \".join([i for i in reasons]) payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name),", "reasons ) headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return requests.post( join(settings.MAP_ROULETTE_API_URL,", "} return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None", "payload = format_challenge_task_payload( feature, challenge_id, name, reasons ) headers = { \"Content-Type\": \"application/json\",", "challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name,", "def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is", "for i in reasons]) payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\":", "\"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]):", "{ \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return requests.post( join(settings.MAP_ROULETTE_API_URL, 'task'), headers=headers, data=payload )", "\"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id,", "challenge_id, name, reasons ) headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return", "not None): payload = format_challenge_task_payload( feature, challenge_id, name, reasons ) headers = {", "or key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'):", "[ key for key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for key", "import json from os.path import join import requests from django.conf import settings def", "key for key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for key in", "{ \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature,", "\"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if", "settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload( feature, challenge_id, name, reasons ) headers", ") headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return requests.post( join(settings.MAP_ROULETTE_API_URL, 'task'),", "for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature", "in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature,", "reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is not None): payload =", "key.startswith('osm:') or key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if", "len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i in reasons]) payload = { \"parent\":", "challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is not None):", "import requests from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove = [ key for", "if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i in reasons]) payload = {", "requests from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove = [ key for key", "None and settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload( feature, challenge_id, name, reasons", "challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i in reasons])", "for key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for key in keys_to_remove:", "feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \",", "feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'):", "= format_challenge_task_payload( feature, challenge_id, name, reasons ) headers = { \"Content-Type\": \"application/json\", \"apiKey\":", "from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove = [ key for key in", "import join import requests from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove = [", "os.path import join import requests from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove =", "name, reasons ) headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return requests.post(", "is not None): payload = format_challenge_task_payload( feature, challenge_id, name, reasons ) headers =", "{\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is", "is not None and settings.MAP_ROULETTE_API_URL is not None): payload = format_challenge_task_payload( feature, challenge_id,", "if key.startswith('osm:') or key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion')", "key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key)", "payload = { \"parent\": challenge_id, \"name\": \"{}\".format(name), \"geometries\": {\"features\": [remove_unneeded_properties(feature)]} } return json.dumps(payload)", "name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is not None): payload", "json from os.path import join import requests from django.conf import settings def remove_unneeded_properties(feature):", "feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] =", "return feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i", "key.startswith('result:') ] for key in keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions')", "= { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY } return requests.post( join(settings.MAP_ROULETTE_API_URL, 'task'), headers=headers, data=payload", "from os.path import join import requests from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove", "import settings def remove_unneeded_properties(feature): keys_to_remove = [ key for key in feature['properties'].keys() if", "join import requests from django.conf import settings def remove_unneeded_properties(feature): keys_to_remove = [ key", "format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for i in", "keys_to_remove: feature['properties'].pop(key) if feature['properties'].get('oldVersion'): feature['properties'].pop('oldVersion') if feature['properties'].get('suspicions'): feature['properties'].pop('suspicions') return feature def format_challenge_task_payload(feature, challenge_id,", "= \", \".join([i for i in reasons]) payload = { \"parent\": challenge_id, \"name\":", "feature, challenge_id, name, reasons ) headers = { \"Content-Type\": \"application/json\", \"apiKey\": settings.MAP_ROULETTE_API_KEY }", "django.conf import settings def remove_unneeded_properties(feature): keys_to_remove = [ key for key in feature['properties'].keys()", "[remove_unneeded_properties(feature)]} } return json.dumps(payload) def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not", "keys_to_remove = [ key for key in feature['properties'].keys() if key.startswith('osm:') or key.startswith('result:') ]", "feature def format_challenge_task_payload(feature, challenge_id, name, reasons=[]): if len(reasons): feature['properties']['osmcha_reasons'] = \", \".join([i for", "push_feature_to_maproulette(feature, challenge_id, name, reasons=[]): if (settings.MAP_ROULETTE_API_KEY is not None and settings.MAP_ROULETTE_API_URL is not" ]
[ "json.loads(string) value = data['value'] known_primes = data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d',", "return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context = context", "worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context = context or zmq.Context.instance() w_socket", "#send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread", "\"\"\"Worker routine\"\"\" print('thread started') context = context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url)", "except zmq.Again as e: pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2,", "or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while", "threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes') for i in range(3, num_ceil+1): data", "context.term() return known_primes if __name__ == '__main__': args = parse_args() known_primes = main(2,", "= json.loads(string) value = data['value'] known_primes = data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d:", "def parse_args(): parser = argparse.ArgumentParser(description='Find all prime number in a range (from 2).')", "c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) == 1: break except zmq.Again as", "data = {'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n", "import time from libs.mylib import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all prime", "import threading import json import time from libs.mylib import is_prime def parse_args(): parser", "w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close()", "number in a range (from 2).') parser.add_argument('max', type=int, default=1000, help='from 2 to MAX')", "print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if __name__ ==", "= context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE,", "json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) == 1: known_primes.append(i)", "terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url =", "print('thread started') context = context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub =", "time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if __name__ == '__main__': args = parse_args()", "value, isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e:", "= data['value'] known_primes = data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d', value, isPrime)", "#!/bin/env python3 import argparse import zmq import threading import json import time from", "print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) == 1: break except zmq.Again as e:", "range (from 2).') parser.add_argument('max', type=int, default=1000, help='from 2 to MAX') return parser.parse_args() def", "value = data['value'] known_primes = data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d', value,", "= data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send reply back", "= w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value'] known_primes = data['known_primes'] isPrime =", "print('Find primes') for i in range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data", "default=1000, help='from 2 to MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\"", "def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context = context or zmq.Context.instance()", "%d', value, isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as", "w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in range(num_threads): thread =", "w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in", "import zmq import threading import json import time from libs.mylib import is_prime def", "for i in range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data = json.dumps(data)", "{'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv()", "i in range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data", "argparse.ArgumentParser(description='Find all prime number in a range (from 2).') parser.add_argument('max', type=int, default=1000, help='from", "i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes') for", "while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit)", "parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context = context or", "e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value'] known_primes", "1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if", "isPrime = is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send reply back to client", "def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url = \"inproc://control\" context =", "context = context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url)", "print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url", "control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context = context or zmq.Context.instance() w_socket =", "for i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes')", "zmq.Again as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value =", "threads') for i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find", "thread.start() print('Find primes') for i in range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes}", "args=(worker_url, control_url, )) thread.start() print('Find primes') for i in range(3, num_ceil+1): data =", "c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if __name__ == '__main__': args", "known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if __name__", "#w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10,", "\"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub =", "except zmq.Again as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value", "c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address,", "range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii');", "time from libs.mylib import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all prime number", "w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2,", "== 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes", "if int(stop_bit) == 1: break except zmq.Again as e: pass try: string =", "data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send reply back to", "2 to MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started')", "context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in range(num_threads): thread", "c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK)", "print('Start threads') for i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start()", ")) thread.start() print('Find primes') for i in range(3, num_ceil+1): data = {'value': i,", "c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in range(num_threads): thread = threading.Thread(target=worker_routine,", "context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\")", "w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value'] known_primes = data['known_primes'] isPrime = is_prime(value,", "= threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes') for i in range(3, num_ceil+1):", "try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value'] known_primes = data['known_primes']", "w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit] =", "= str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done finding')", "= c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) == 1: break except zmq.Again", "is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all prime number in a range (from", "= json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) == 1:", "context=None): \"\"\"Worker routine\"\"\" print('thread started') context = context or zmq.Context.instance() w_socket = context.socket(zmq.REP)", "in a range (from 2).') parser.add_argument('max', type=int, default=1000, help='from 2 to MAX') return", "]): worker_url = \"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ)", "e: pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url =", "\"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start", "= w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close()", "main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance()", "control_url, )) thread.start() print('Find primes') for i in range(3, num_ceil+1): data = {'value':", "#context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url = \"inproc://control\" context", "json import time from libs.mylib import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all", "a range (from 2).') parser.add_argument('max', type=int, default=1000, help='from 2 to MAX') return parser.parse_args()", "context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads')", "w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try:", "help='from 2 to MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread", "== 1: break except zmq.Again as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data", "int(stop_bit) == 1: break except zmq.Again as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK)", "known_primes = data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send reply", "range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes') for i in", "known_primes if __name__ == '__main__': args = parse_args() known_primes = main(2, args.max) print(known_primes)", "zmq import threading import json import time from libs.mylib import is_prime def parse_args():", "context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s,", "as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value']", "string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value'] known_primes = data['known_primes'] isPrime", "[address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) == 1: break", "str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S',", "back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread terminated') #w_socket.close()", "in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes') for i", "import argparse import zmq import threading import json import time from libs.mylib import", "parse_args(): parser = argparse.ArgumentParser(description='Find all prime number in a range (from 2).') parser.add_argument('max',", "context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit]", "b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if __name__ == '__main__': args =", "import json import time from libs.mylib import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find", "to MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context", "= context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url,", "= \"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url)", "from libs.mylib import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all prime number in", "i, 'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if", "str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) ==", "client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread terminated') #w_socket.close() #context.close() def", "parser.add_argument('max', type=int, default=1000, help='from 2 to MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None):", "known_primes) #print('%d: %d', value, isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except", "worker_url = \"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url)", "primes') for i in range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data =", "num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance() w_socket", "#print('%d: %d', value, isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again", "= zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for", "(from 2).') parser.add_argument('max', type=int, default=1000, help='from 2 to MAX') return parser.parse_args() def worker_routine(worker_url,", "finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return known_primes if __name__ == '__main__':", "context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url,", "c_pub.bind(control_url) print('Start threads') for i in range(num_threads): thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, ))", "known_primes=[2, ]): worker_url = \"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance() w_socket =", "prime number in a range (from 2).') parser.add_argument('max', type=int, default=1000, help='from 2 to", "stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) == 1: break except", "zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True:", "data = json.loads(string) value = data['value'] known_primes = data['known_primes'] isPrime = is_prime(value, known_primes)", "routine\"\"\" print('thread started') context = context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub", "'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n)", "try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) == 1:", "c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit))", "%s'%(address, stop_bit)) if int(stop_bit) == 1: break except zmq.Again as e: pass try:", "pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\"", "threading import json import time from libs.mylib import is_prime def parse_args(): parser =", "MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker routine\"\"\" print('thread started') context =", "y_n = w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1)", "c_pub.close() context.term() return known_primes if __name__ == '__main__': args = parse_args() known_primes =", "= context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==>", "#w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url = \"inproc://workers\" control_url = \"inproc://control\"", "thread = threading.Thread(target=worker_routine, args=(worker_url, control_url, )) thread.start() print('Find primes') for i in range(3,", "= argparse.ArgumentParser(description='Find all prime number in a range (from 2).') parser.add_argument('max', type=int, default=1000,", "argparse import zmq import threading import json import time from libs.mylib import is_prime", "= context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB) c_sub.connect(control_url) c_sub.setsockopt(zmq.SUBSCRIBE, b\"S\") while True: try: [address,", "data['value'] known_primes = data['known_primes'] isPrime = is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send", "= {'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n =", "isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass", "%s, %s'%(address, stop_bit)) if int(stop_bit) == 1: break except zmq.Again as e: pass", "in range(3, num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data =", "if int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term()", "reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread terminated')", "w_socket.close() c_pub.close() context.term() return known_primes if __name__ == '__main__': args = parse_args() known_primes", "type=int, default=1000, help='from 2 to MAX') return parser.parse_args() def worker_routine(worker_url, control_url, context=None): \"\"\"Worker", "libs.mylib import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all prime number in a", "pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string) value = data['value'] known_primes =", "True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if int(stop_bit) ==", "parser = argparse.ArgumentParser(description='Find all prime number in a range (from 2).') parser.add_argument('max', type=int,", "= is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime)", "to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True) except zmq.Again as e: pass print('thread terminated') #w_socket.close() #context.close()", "return known_primes if __name__ == '__main__': args = parse_args() known_primes = main(2, args.max)", "zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i", "all prime number in a range (from 2).') parser.add_argument('max', type=int, default=1000, help='from 2", "int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1']) time.sleep(1) w_socket.close() c_pub.close() context.term() return", "= \"inproc://workers\" control_url = \"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub", "1: break except zmq.Again as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data =", "break except zmq.Again as e: pass try: string = w_socket.recv(flags=zmq.NOBLOCK) data = json.loads(string)", "zmq.Again as e: pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]):", "python3 import argparse import zmq import threading import json import time from libs.mylib", "= context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB) c_pub.bind(control_url) print('Start threads') for i in range(num_threads):", "control_url = \"inproc://control\" context = zmq.Context.instance() w_socket = context.socket(zmq.REQ) w_socket.bind(worker_url) c_pub = context.socket(zmq.PUB)", "as e: pass print('thread terminated') #w_socket.close() #context.close() def main(num_threads=2, num_ceil=10, known_primes=[2, ]): worker_url", "w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done finding') c_pub.send_multipart([b'S', b'1'])", "b_data = str_data.encode('ascii'); w_socket.send(b_data) y_n = w_socket.recv() if int(y_n) == 1: known_primes.append(i) print('Done", "is_prime(value, known_primes) #print('%d: %d', value, isPrime) #send reply back to client w_socket.send(b\"%d\"%isPrime) #w_socket.send(b'%d'%True)", "2).') parser.add_argument('max', type=int, default=1000, help='from 2 to MAX') return parser.parse_args() def worker_routine(worker_url, control_url,", "b\"S\") while True: try: [address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK) print('==> %s, %s'%(address, stop_bit)) if", "started') context = context or zmq.Context.instance() w_socket = context.socket(zmq.REP) w_socket.connect(worker_url) c_sub = context.socket(zmq.SUB)", "num_ceil+1): data = {'value': i, 'known_primes':known_primes} str_data = json.dumps(data) b_data = str_data.encode('ascii'); w_socket.send(b_data)", "import is_prime def parse_args(): parser = argparse.ArgumentParser(description='Find all prime number in a range", "stop_bit)) if int(stop_bit) == 1: break except zmq.Again as e: pass try: string" ]
[ "wikipedia --model prone --seed 0 1 2 3 4 --hidden-size 2\") pass if", "unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1 2 3 4 --hidden-size 2\")", "def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1", "prone --seed 0 1 2 3 4 --hidden-size 2\") pass if __name__ ==", "os import sys sys.path.append('../') def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model", "--task unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1 2 3 4 --hidden-size", "0 1 2 3 4 --hidden-size 2\") pass if __name__ == \"__main__\": test_prone()", "<gh_stars>10-100 import os import sys sys.path.append('../') def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset", "os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1 2 3", "--dataset wikipedia --model prone --seed 0 1 2 3 4 --hidden-size 2\") pass", "--seed 0 1 2 3 4 --hidden-size 2\") pass if __name__ == \"__main__\":", "sys.path.append('../') def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed 0", "test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1 2", "--model prone --seed 0 1 2 3 4 --hidden-size 2\") pass if __name__", "../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1 2 3 4", "sys sys.path.append('../') def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed", "import sys sys.path.append('../') def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone", "import os import sys sys.path.append('../') def test_prone(): os.system(\"python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia" ]
[ "f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\" def format_summary(self, data): counts = data[\"counts\"]", "Unless required by applicable law or agreed to in writing, software # distributed", "= self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"] return d", "\"\\n\" return s + \"\\n\" def format_title(self, data): if data[\"title\"]: return \"<br>\" +", "\"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as a link", "{color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n')", "test[\"messages\"] = \"\".join(messages) return test def add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for", "if not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\": []} return (_specs,", "getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"])", "spec_names, path, results): _requirements = {} _specs = [] if path == \"-\":", "= data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r in reqs.values(): s += f'\\n<section", "list(results[\"tests\"].values()) for i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP", "def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\")", "body += self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data) return template.strip() % {", "* 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied / float(counts.units)", "specifications for which to generate coverage report\" \", default: include all specifications. Only", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n'", "{\"data\": data} + \"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\" return", "requirements = self.requirements(args.only, source, results) # if custom title was not specified generate", "satisfied self.unsatisfied = unsatisfied self.untested = untested def __bool__(self): return self.units > 0", "_requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module =", "break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id,", "@classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements", "\"\\n\" def format_summary(self, data): counts = data[\"counts\"] def template(value, title, color): return (", "+= f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result = test[\"result\"] cls", "s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure", "counts def company(self, args): d = {} if args.copyright: d[\"name\"] = args.copyright if", "is False: continue result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in", "def format_table(self, data): reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r in", "metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one or more specifications for which to", "d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"])", "+ \"||\\n\" return s + \"\\n\" def format_table(self, data): reqs = data[\"requirements\"] s", "datetime from functools import partial import testflows.settings as settings import testflows._core.cli.arg.type as argtype", "nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"])", "i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts = Counts(\"requirements\", *([0]", "\"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as a link to the input", "include all specification names title = args.title if title is None and specs:", "data): if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def", "\"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\": data} + \"</p>\\n\"", "def __init__(self, name, units, satisfied, unsatisfied, untested): self.name = name self.units = units", "{ \"date\": time.time(), \"version\": __version__, } def requirements(self, spec_names, path, results): _requirements =", "r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]:", "time.time(), \"version\": __version__, } def requirements(self, spec_names, path, results): _requirements = {} _specs", "SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]):", "float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied /", "float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0: s += template(f\"{counts.untested /", "ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string", "{ \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\" s", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "\"No tests\" else: s += '<div class=\"chart\">' if counts.satisfied > 0: s +=", "*([0] * 4)) for req in requirements.values(): counts.units += 1 tests = req[\"tests\"]", "s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0:", "Software Testing Framework (http://testflows.com) # # Licensed under the Apache License, Version 2.0", "f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b", "class Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors", "flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue result", "+ 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False:", "in result_map.items()] ) + \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "result[\"result_type\"] != \"OK\": satisfied = False if satisfied: counts.satisfied += 1 req[\"status\"] =", "if title is None and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in specs])", "class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>'", "TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler", "tests\\n</div>' s += \"\\n\" return s + \"\\n\" def format_title(self, data): if data[\"title\"]:", "TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for", "language governing permissions and # limitations under the License. import os import sys", "custom title was not specified generate a title # that include all specification", "\"color-error\" } def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\")", "import TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import", "Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\"", "continue result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test,", "test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); }", "def metadata(self, results): return { \"date\": time.time(), \"version\": __version__, } def requirements(self, spec_names,", "} icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self,", "False: continue if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\", "def table(self, results): table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return", "\"OK\": satisfied = False if satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\" else:", "& SKIP and settings.show_skipped is False: continue result = test[\"result\"] for requirement in", "if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data):", "} class Counts(object): def __init__(self, name, units, satisfied, unsatisfied, untested): self.name = name", "result = test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied = False if satisfied: counts.satisfied", "import title as make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement logo", "settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >=", "default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as a link to", "{ \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data): if not", "description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default:", "\"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data): if not data[\"company\"].get(\"logo\"):", "notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image", "messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def add_tests(self, requirements, results):", "+ \"\\n\" def format_title(self, data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\"", "class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s +=", "result, name, default=None): tests = list(result[\"tests\"].values()) if not tests: return default test =", "metadata = data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows}", "epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file,", "\"rows\": [], } return table def metadata(self, results): return { \"date\": time.time(), \"version\":", "__bool__(self): return self.units > 0 class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser =", "data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' )", "self.units = units self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested = untested def", "result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div", "if not matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req),", "not use this file except in compliance with the License. # You may", "= True for test in tests: result = test[\"result\"] if result[\"result_type\"] != \"OK\":", "def company(self, args): d = {} if args.copyright: d[\"name\"] = args.copyright if args.confidential:", "units, satisfied, unsatisfied, untested): self.name = name self.units = units self.satisfied = satisfied", "for spec in results[\"specifications\"]: if spec_names: matched = False for name in spec_names:", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "+= 1 req[\"status\"] = \"untested\" else: satisfied = True for test in tests:", "\"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\",", "and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"],", "def counts(self, requirements): counts = Counts(\"requirements\", *([0] * 4)) for req in requirements.values():", "(from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log,", "from testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter from", "function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test procedure on click", "agreed to in writing, software # distributed under the License is distributed on", "reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r in reqs.values(): s +=", "name, units, satisfied, unsatisfied, untested): self.name = name self.units = units self.satisfied =", "req[\"tests\"] if not tests: counts.untested += 1 req[\"status\"] = \"untested\" else: satisfied =", "counts.untested > 0: s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s", "%(script)s </script> \"\"\" script = \"\"\" window.onload = function(){ // Toggle requirement description", "no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def add_tests(self, requirements,", "if path == \"-\": for spec in results[\"specifications\"]: if spec_names: matched = False", "and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in", "str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s + \"\\n\" def format_table(self, data):", "ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True))", "= data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s", "\"\"\" script = \"\"\" window.onload = function(){ // Toggle requirement description on click", "f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if", "parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one", "table def metadata(self, results): return { \"date\": time.time(), \"version\": __version__, } def requirements(self,", "testflows._core.cli.arg.type as argtype from testflows._core import __version__ from testflows._core.flags import Flags, SKIP from", "name: return attr[\"attribute_value\"] return default def table(self, results): table = { \"header\": [\"Requirement\",", "0: s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested >", "type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)')", "format_summary(self, data): counts = data[\"counts\"] def template(value, title, color): return ( f'<div class=\"c100", "if flags & SKIP and settings.show_skipped is False: continue result = test[\"result\"] for", "to in writing, software # distributed under the License is distributed on an", "template(value, title, color): return ( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>'", "requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return", "implied. # See the License for the specific language governing permissions and #", "d[\"logo\"] = args.logo.read() return d def data(self, source, results, args): d = dict()", "if spec_names: matched = False for name in spec_names: if name in spec[\"specification_name\"]:", "counts.units += 1 tests = req[\"tests\"] if not tests: counts.untested += 1 req[\"status\"]", "import partial import testflows.settings as settings import testflows._core.cli.arg.type as argtype from testflows._core import", "name self.units = units self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested = untested", "make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement logo = '<img class=\"logo\"", "\"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status", "Coverage\\n\" for r in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description", "'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is", "description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result =", "class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\" return", "FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template =", "\"green\") if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\",", "\"-\": for spec in results[\"specifications\"]: if spec_names: matched = False for name in", "formatter, results, args): output = args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "TestFlows.com Open-Source Software Testing Framework (http://testflows.com) # # Licensed under the Apache License,", "format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n'", "cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s", "return s + \"\\n\" def format_summary(self, data): counts = data[\"counts\"] def template(value, title,", "bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification", "+ \"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document", "data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\" def format(self, data): body = \"\"", "test = tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"]", "https://clickhouse.yandex <script> %(script)s </script> \"\"\" script = \"\"\" window.onload = function(){ // Toggle", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", ") + \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units),", "parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\",", "Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] >", "testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\",", "nargs=\"+\", help=(\"name of one or more specifications for which to generate coverage report\"", "\"unsatisfied\" return counts def company(self, args): d = {} if args.copyright: d[\"name\"] =", "p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>'", "100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0: s += template(f\"{counts.untested / float(counts.units) *", "continue if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True))", "epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "args.copyright if args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"] = args.logo.read() return d", "def format_statistics(self, data): counts = data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\",", "\"\"\" window.onload = function(){ // Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click',", "limitations under the License. import os import sys import json import time import", "\"\" body += self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data)", "self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts =", "handle(self, args): results = {} formatter = Formatter() ResultsLogPipeline(args.input, results).run() self.generate(formatter, results, args)", "Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); });", "s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0:", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class Formatter: utf_icons =", "= \"\\n## Summary\\n\" if counts.units <= 0: s += \"No tests\" else: s", "add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages", "https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script = \"\"\" window.onload = function(){", "return s + \"\\n\" def format_table(self, data): reqs = data[\"requirements\"] s = \"\\n\\n##", "template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return s def", "unsatisfied, untested): self.name = name self.units = units self.satisfied = satisfied self.unsatisfied =", "function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){", "'<div class=\"chart\">' if counts.satisfied > 0: s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\",", "%Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\" def format_summary(self, data):", "for i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and", "nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output", "metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"),", "SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType,", "specific language governing permissions and # limitations under the License. import os import", "enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue", "class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\" if counts.units", "= self.company(args) counts = d[\"counts\"] return d def generate(self, formatter, results, args): output", "= testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\",", "under the License. import os import sys import json import time import base64", "spec_names: if name in spec[\"specification_name\"]: matched = True break if not matched: continue", "format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import title as make_title", "attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default def table(self,", "+= \"No tests\" else: s += '<div class=\"chart\">' if counts.satisfied > 0: s", "{metadata[\"version\"]}||\\n' ) return s + \"\\n\" def format_summary(self, data): counts = data[\"counts\"] def", "return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"]", "generate a title # that include all specification names title = args.title if", "result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i,", "formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-' (from input log)\",", "metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright", "Framework (http://testflows.com) # # Licensed under the Apache License, Version 2.0 (the \"License\");", "1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return counts", "d def data(self, source, results, args): d = dict() specs, requirements = self.requirements(args.only,", "help=(\"name of one or more specifications for which to generate coverage report\" \",", "= { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return table def metadata(self, results):", "args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"] = args.logo.read() return d def data(self,", "testflows._core.flags import Flags, SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog from", "data): reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r in reqs.values(): s", "results, args)) ) output.write(\"\\n\") def handle(self, args): results = {} formatter = Formatter()", "default: include all specifications. Only a unique part of the name can be", "0: s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n'", ") return s + \"\\n\" def format_summary(self, data): counts = data[\"counts\"] def template(value,", "encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status.", "document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class Formatter: utf_icons", "Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span", "+ \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()]", "getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"],", "requirements, results): tests = list(results[\"tests\"].values()) for i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"])", "counts.satisfied += 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\"", "in spec_names: if name in spec[\"specification_name\"]: matched = True break if not matched:", "unsatisfied self.untested = untested def __bool__(self): return self.units > 0 class Handler(HandlerBase): @classmethod", "__version__ from testflows._core.flags import Flags, SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common import", "class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\",", "'<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\" if", "as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom", "class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults", "\"\\n\" def format_table(self, data): reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r", "importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name]", "parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\",", "Counts(object): def __init__(self, name, units, satisfied, unsatisfied, untested): self.name = name self.units =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "not specified generate a title # that include all specification names title =", "[f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] ) + \"||\\n\" s +=", "\"version\": __version__, } def requirements(self, spec_names, path, results): _requirements = {} _specs =", "\"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\" s += \"||\"", "from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals", "tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default", "def handle(self, args): results = {} formatter = Formatter() ResultsLogPipeline(args.input, results).run() self.generate(formatter, results,", "class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s + \"\\n\" def format_title(self, data):", "\"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>'", "Coverage Report%(title)s %(body)s --- Generated by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]:", "class=\"chart\">' if counts.satisfied > 0: s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\",", "else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return counts def company(self, args): d", "data(self, source, results, args): d = dict() specs, requirements = self.requirements(args.only, source, results)", "False for name in spec_names: if name in spec[\"specification_name\"]: matched = True break", "See the License for the specific language governing permissions and # limitations under", "help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[],", "and settings.show_skipped is False: continue result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if", "_specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else: spec", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "import datetime from functools import partial import testflows.settings as settings import testflows._core.cli.arg.type as", "default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\",", "title = \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements,", "body = \"\" body += self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data) body", "float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return s def format_statistics(self, data):", "__version__, } def requirements(self, spec_names, path, results): _requirements = {} _specs = []", "source, results, args): d = dict() specs, requirements = self.requirements(args.only, source, results) #", "hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower() s +=", "Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short", "args.logo: d[\"logo\"] = args.logo.read() return d def data(self, source, results, args): d =", "not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\": []} return (_specs, _requirements)", "= self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"] return d def generate(self, formatter,", "for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"]))", "* 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0: s += template(f\"{counts.untested / float(counts.units)", "class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s --- Generated by {testflows} Open-Source Test", "= args.logo.read() return d def data(self, source, results, args): d = dict() specs,", "data): counts = data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\"", "for req in requirements.values(): counts.units += 1 tests = req[\"tests\"] if not tests:", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta", "1 req[\"status\"] = \"unsatisfied\" return counts def company(self, args): d = {} if", "--- # Requirements Coverage Report%(title)s %(body)s --- Generated by {testflows} Open-Source Test Framework", "input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md", "} \"\"\" class Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\"", "procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\"", "not tests: counts.untested += 1 req[\"status\"] = \"untested\" else: satisfied = True for", "\"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> ---", "f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div", "False if satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1", "Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script", "testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler", "def template(value, title, color): return ( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span", "continue if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and", "from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\"", "body, \"script\": script, \"title\": self.format_title(data) } class Counts(object): def __init__(self, name, units, satisfied,", "data): if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self,", "self.name = name self.units = units self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested", "\"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as a link to the", "f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\"", "specification names title = args.title if title is None and specs: title =", "True break if not matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] =", "if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self,", "from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp,", "path == \"-\": for spec in results[\"specifications\"]: if spec_names: matched = False for", "\"||\\n\" return s + \"\\n\" def format_table(self, data): reqs = data[\"requirements\"] s =", "if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\" def format(self, data): body =", "d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"] = args.logo.read()", "'</div>' '</div>\\n') s = \"\\n## Summary\\n\" if counts.units <= 0: s += \"No", "KIND, either express or implied. # See the License for the specific language", "tests\" else: s += '<div class=\"chart\">' if counts.satisfied > 0: s += template(f\"{counts.satisfied", "req[\"status\"] = \"untested\" else: satisfied = True for test in tests: result =", "if not tests: return default test = tests[0][\"test\"] for attr in test[\"attributes\"]: if", "results): _requirements = {} _specs = [] if path == \"-\": for spec", "\\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t", "{} _specs = [] if path == \"-\": for spec in results[\"specifications\"]: if", "d = dict() specs, requirements = self.requirements(args.only, source, results) # if custom title", "\"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test", "action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str)", "attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default def table(self, results): table = {", "= data[\"counts\"] def template(value, title, color): return ( f'<div class=\"c100 p{value} {color} smaller-title\">'", "= result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s +=", "if name in spec[\"specification_name\"]: matched = True break if not matched: continue _specs.append(spec)", "= \"\".join(messages) return test def add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for i,", "= test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied = False if satisfied: counts.satisfied +=", "Only a unique part of the name can be specified.\" )) parser.set_defaults(func=cls()) def", "value, \"tests\": []} return (_specs, _requirements) def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id):", "= self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts", "click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class Formatter:", "hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\"", "%-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\" def format_summary(self, data): counts", "def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"]", "ANY KIND, either express or implied. # See the License for the specific", "stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\",", "def get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values()) if not tests: return default", "messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "is False: continue if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test", "req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return counts def", "satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] =", "self.units > 0 class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements", "= importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items(): if", "= {\"requirement\": Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec)", "Open-Source Software Testing Framework (http://testflows.com) # # Licensed under the Apache License, Version", "functools import partial import testflows.settings as settings import testflows._core.cli.arg.type as argtype from testflows._core", "s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k,", "return counts def company(self, args): d = {} if args.copyright: d[\"name\"] = args.copyright", "in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s + \"\\n\" def", "type=str, default=[], nargs=\"+\", help=(\"name of one or more specifications for which to generate", "get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values()) if not tests: return default test", "class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"] s = ( \"\\n\\n\"", "args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self, args): results = {}", "+= self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data) return template.strip() % { \"logo\":", "formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self, args): results = {} formatter =", "= [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements", "[] if path == \"-\": for spec in results[\"specifications\"]: if spec_names: matched =", "data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\": data} + \"</p>\\n\" def", "matched = False for name in spec_names: if name in spec[\"specification_name\"]: matched =", "\"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"] s", "requirements(self, spec_names, path, results): _requirements = {} _specs = [] if path ==", "in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if", "\"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s", "d[\"confidential\"] = True if args.logo: d[\"logo\"] = args.logo.read() return d def data(self, source,", "s def format_statistics(self, data): counts = data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\":", "test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default def table(self, results): table", "v in result_map.items()] ) + \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i", "test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType,", "for name in spec_names: if name in spec[\"specification_name\"]: matched = True break if", "else: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags &", "\"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\":", "name, default=None): tests = list(result[\"tests\"].values()) if not tests: return default test = tests[0][\"test\"]", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s", "result_map.items()] ) + \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\",", "'<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\" if counts.units <=", "name, value in vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "result = test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span", "SKIP and settings.show_skipped is False: continue result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]:", "help=\"requirements source file, default: '-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\",", "applicable law or agreed to in writing, software # distributed under the License", "and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] = title d[\"requirements\"]", "+= template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0: s", "default=[], nargs=\"+\", help=(\"name of one or more specifications for which to generate coverage", "= { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data): if", "# Copyright 2019 Katteli Inc. # TestFlows.com Open-Source Software Testing Framework (http://testflows.com) #", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "in r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result", "_requirements[value.name] = {\"requirement\": value, \"tests\": []} return (_specs, _requirements) def add_test_messages(self, test, idx,", "\"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\",", "input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default:", "= self.requirements(args.only, source, results) # if custom title was not specified generate a", "as settings import testflows._core.cli.arg.type as argtype from testflows._core import __version__ from testflows._core.flags import", "was not specified generate a title # that include all specification names title", "writing, software # distributed under the License is distributed on an \"AS IS\"", "type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\",", "if counts.untested > 0: s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\")", "def data(self, source, results, args): d = dict() specs, requirements = self.requirements(args.only, source,", "class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower() s", "Summary\\n\" if counts.units <= 0: s += \"No tests\" else: s += '<div", "counts = d[\"counts\"] return d def generate(self, formatter, results, args): output = args.output", "the name can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests", "= r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in", "compliance with the License. # You may obtain a copy of the License", "self.company(args) counts = d[\"counts\"] return d def generate(self, formatter, results, args): output =", "* 4)) for req in requirements.values(): counts.units += 1 tests = req[\"tests\"] if", "'<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\",", "# limitations under the License. import os import sys import json import time", "args.logo.read() return d def data(self, source, results, args): d = dict() specs, requirements", "class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\" if counts.units <= 0: s +=", "for the specific language governing permissions and # limitations under the License. import", "+ logo % {\"data\": data} + \"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"):", "s += '<div class=\"chart\">' if counts.satisfied > 0: s += template(f\"{counts.satisfied / float(counts.units)", "\"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data) }", "nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\")", "item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class Formatter: utf_icons = {", "and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"])", "+= self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\":", "all specification names title = args.title if title is None and specs: title", "ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent,", "r in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\")", "\"red\") if counts.untested > 0: s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\",", "if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default def table(self, results): table =", "template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script,", "from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import", "Counts(\"requirements\", *([0] * 4)) for req in requirements.values(): counts.units += 1 tests =", "in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts", "in results[\"specifications\"]: if spec_names: matched = False for name in spec_names: if name", "format_title(self, data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\" def format(self, data):", "script, \"title\": self.format_title(data) } class Counts(object): def __init__(self, name, units, satisfied, unsatisfied, untested):", "0 class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\",", "[\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section", "= {\"requirement\": value, \"tests\": []} return (_specs, _requirements) def add_test_messages(self, test, idx, tests,", "(the \"License\"); # you may not use this file except in compliance with", "= ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s", "Copyright 2019 Katteli Inc. # TestFlows.com Open-Source Software Testing Framework (http://testflows.com) # #", "= untested def __bool__(self): return self.units > 0 class Handler(HandlerBase): @classmethod def add_command(cls,", "counts = data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" }", "metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\",", "# Unless required by applicable law or agreed to in writing, software #", "data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\" def format(self, data): body", "commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path,", "started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)]", "by applicable law or agreed to in writing, software # distributed under the", "import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from", "> 0: s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested", "= False for name in spec_names: if name in spec[\"specification_name\"]: matched = True", "is None and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] =", "Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors =", "( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>'", "counts(self, requirements): counts = Counts(\"requirements\", *([0] * 4)) for req in requirements.values(): counts.units", "file except in compliance with the License. # You may obtain a copy", "for attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default def", "= \"\\n\\n## Coverage\\n\" for r in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon", "import time import base64 import threading import importlib.util from datetime import datetime from", "d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"] return", "= list(result[\"tests\"].values()) if not tests: return default test = tests[0][\"test\"] for attr in", "> ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"],", "format_statistics(self, data): counts = data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\":", "self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data) return template.strip() %", "--- Generated by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex", "HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline", "type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str,", "template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0: s +=", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "spec in results[\"specifications\"]: if spec_names: matched = False for name in spec_names: if", "more specifications for which to generate coverage report\" \", default: include all specifications.", "in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped", "_specs = [] if path == \"-\": for spec in results[\"specifications\"]: if spec_names:", "to generate coverage report\" \", default: include all specifications. Only a unique part", "if counts.satisfied > 0: s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\")", "as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import", "unique part of the name can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result,", "args): output = args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self, args):", "\"untested\": \"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" }", "r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s +", ">= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else:", "class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>'", "default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\",", "s += \"No tests\" else: s += '<div class=\"chart\">' if counts.satisfied > 0:", "class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\"", "strftimedelta from testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects", "return requirements def counts(self, requirements): counts = Counts(\"requirements\", *([0] * 4)) for req", "s = \"\\n## Summary\\n\" if counts.units <= 0: s += \"No tests\" else:", "help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]),", "\"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\"", "if args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"]", "return '\\n<p>' + logo % {\"data\": data} + \"</p>\\n\" def format_confidential(self, data): if", "<= 0: s += \"No tests\" else: s += '<div class=\"chart\">' if counts.satisfied", "testflows.settings as settings import testflows._core.cli.arg.type as argtype from testflows._core import __version__ from testflows._core.flags", "testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase", "for name, value in vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\":", "+ \"\\n\" def format_table(self, data): reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for", "\"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s", "[format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in", "= \"\"\" window.onload = function(){ // Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){", "metavar=\"attribute\", help=\"attribute that is used as a link to the input log, default:", "if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx + 1:]: flags =", "parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\",", "f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\" def format_summary(self, data): counts =", "item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click',", "import Flags, SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common", "+ \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return", "flags & SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break", "+= \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) +", "is False: continue if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent,", "class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]:", "from testflows._core.flags import Flags, SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog", "tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id,", "2019 Katteli Inc. # TestFlows.com Open-Source Software Testing Framework (http://testflows.com) # # Licensed", "if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"],", "messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def add_tests(self, requirements, results): tests =", "in specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"]", "'</div>\\n') s = \"\\n## Summary\\n\" if counts.units <= 0: s += \"No tests\"", "class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults =", "in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return default def table(self, results):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data):", "Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\": []} return (_specs, _requirements) def add_test_messages(self,", "generate(self, formatter, results, args): output = args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\")", "data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata =", "bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"),", "self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"] return d def", "test, idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages =", "parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add", "title was not specified generate a title # that include all specification names", "results): tests = list(results[\"tests\"].values()) for i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if", "# TestFlows.com Open-Source Software Testing Framework (http://testflows.com) # # Licensed under the Apache", "confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\",", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "= req[\"tests\"] if not tests: counts.untested += 1 req[\"status\"] = \"untested\" else: satisfied", "format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import title as make_title from", "testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from", "results, args): d = dict() specs, requirements = self.requirements(args.only, source, results) # if", "= test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests,", "self.unsatisfied = unsatisfied self.untested = untested def __bool__(self): return self.units > 0 class", "& SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if", "data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r in reqs.values(): s += f'\\n<section class=\"requirement\"><span", "settings import testflows._core.cli.arg.type as argtype from testflows._core import __version__ from testflows._core.flags import Flags,", "+= f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s +=", "s += \"\\n\" return s + \"\\n\" def format_title(self, data): if data[\"title\"]: return", "all specifications. Only a unique part of the name can be specified.\" ))", "[]} return (_specs, _requirements) def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started =", "counts.untested += 1 req[\"status\"] = \"untested\" else: satisfied = True for test in", "def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>'", "result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s", "\"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template", "specs, requirements = self.requirements(args.only, source, results) # if custom title was not specified", "= True if args.logo: d[\"logo\"] = args.logo.read() return d def data(self, source, results,", "return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"] s =", "base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\": data} + \"</p>\\n\" def format_confidential(self, data):", "the License for the specific language governing permissions and # limitations under the", "test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test:", "d = {} if args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] = True", "\"Unsatisfied\", \"red\") if counts.untested > 0: s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\",", "image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name", "True for test in tests: result = test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied", "from testflows._core import __version__ from testflows._core.flags import Flags, SKIP from testflows._core.testtype import TestType", "default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"],", "format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def", "coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "flags & SKIP and settings.show_skipped is False: continue result = test[\"result\"] for requirement", "0: s += \"No tests\" else: s += '<div class=\"chart\">' if counts.satisfied >", "no_colors=True)) else: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags", "License. import os import sys import json import time import base64 import threading", "\"Untested\" } s = \"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"]", "\"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx + 1:]:", "= Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"]", "log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\",", "choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as", "parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values()) if not tests: return", "(http://testflows.com) # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "def format(self, data): body = \"\" body += self.format_metadata(data) body += self.format_summary(data) body", "1 req[\"status\"] = \"untested\" else: satisfied = True for test in tests: result", "testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright", "Testing Framework (http://testflows.com) # # Licensed under the Apache License, Version 2.0 (the", "class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description", "matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []}", "= \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results)", "format_metadata(self, data): metadata = data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\"", "Version 2.0 (the \"License\"); # you may not use this file except in", "from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import", "str(counts.untested)]]) + \"||\\n\" return s + \"\\n\" def format_table(self, data): reqs = data[\"requirements\"]", "self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data) } class Counts(object): def __init__(self, name,", "not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\":", "self.requirements(args.only, source, results) # if custom title was not specified generate a title", "script = \"\"\" window.onload = function(){ // Toggle requirement description on click document.querySelectorAll('.requirement').forEach(", "format_table(self, data): reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\" for r in reqs.values():", "test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>'", "s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return", "on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test", "test in tests: result = test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied = False", ") output.write(\"\\n\") def handle(self, args): results = {} formatter = Formatter() ResultsLogPipeline(args.input, results).run()", "satisfied = False if satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied", "untested def __bool__(self): return self.units > 0 class Handler(HandlerBase): @classmethod def add_command(cls, commands):", "self.format_statistics(data) body += self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\":", "from testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import", "+= 1 req[\"status\"] = \"unsatisfied\" return counts def company(self, args): d = {}", "\"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\"", "settings.show_skipped is False: continue result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"]", "'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that", "isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\": []} return (_specs, _requirements) def", "logo % {\"data\": data} + \"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return", "tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def", "} return table def metadata(self, results): return { \"date\": time.time(), \"version\": __version__, }", "metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-' (from input log)\", nargs=\"?\", default=\"-\")", "data): if not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo", "satisfied, unsatisfied, untested): self.name = name self.units = units self.satisfied = satisfied self.unsatisfied", "return ( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div", "\"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return table def metadata(self, results): return {", "report\" \", default: include all specifications. Only a unique part of the name", "commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter)", "counts.units <= 0: s += \"No tests\" else: s += '<div class=\"chart\">' if", "+= \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v", "body += self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data),", "= True break if not matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]]", "Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for", "parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as a link to the input log,", "t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"],", "result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] ) + \"||\\n\" s += \"||\" +", "i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s + \"\\n\"", "for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else: spec =", "s = \"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span", "\"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] )", "</script> \"\"\" script = \"\"\" window.onload = function(){ // Toggle requirement description on", "s + \"\\n\" def format_summary(self, data): counts = data[\"counts\"] def template(value, title, color):", "\"untested\" else: satisfied = True for test in tests: result = test[\"result\"] if", "+ \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied),", "messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for", "\"date\": time.time(), \"version\": __version__, } def requirements(self, spec_names, path, results): _requirements = {}", "a link to the input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str,", "default def table(self, results): table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], }", "tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx + 1:]: flags", "output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self, args): results = {} formatter", "== \"-\": for spec in results[\"specifications\"]: if spec_names: matched = False for name", "if not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo %", "if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self,", "\"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\"", "OF ANY KIND, either express or implied. # See the License for the", "{testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script>", "color): return ( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">'", "\"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d,", "// Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); });", "argtype from testflows._core import __version__ from testflows._core.flags import Flags, SKIP from testflows._core.testtype import", "alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\")", "/ float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if counts.untested > 0: s += template(f\"{counts.untested", "# if custom title was not specified generate a title # that include", "\"\\n\\n## Coverage\\n\" for r in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>'", "untested): self.name = name self.units = units self.satisfied = satisfied self.unsatisfied = unsatisfied", "permissions and # limitations under the License. import os import sys import json", "(Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\",", "path, results): _requirements = {} _specs = [] if path == \"-\": for", "import importlib.util from datetime import datetime from functools import partial import testflows.settings as", "log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\",", "from functools import partial import testflows.settings as settings import testflows._core.cli.arg.type as argtype from", "importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items(): if not", "type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1,", "Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for", "Katteli Inc. # TestFlows.com Open-Source Software Testing Framework (http://testflows.com) # # Licensed under", "True if args.logo: d[\"logo\"] = args.logo.read() return d def data(self, source, results, args):", "self.format_title(data) } class Counts(object): def __init__(self, name, units, satisfied, unsatisfied, untested): self.name =", "Requirements Coverage Report%(title)s %(body)s --- Generated by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span", "return s def format_statistics(self, data): counts = data[\"counts\"] result_map = { \"OK\": \"Satisfied\",", "requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); //", "t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True))", "= tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return attr[\"attribute_value\"] return", "no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def add_tests(self, requirements, results): tests = list(results[\"tests\"].values())", "Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute", "> 0 class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage", "requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts =", "or agreed to in writing, software # distributed under the License is distributed", "for test in r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div", "the input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default:", "on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class", "format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' +", "[], } return table def metadata(self, results): return { \"date\": time.time(), \"version\": __version__,", "type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\",", "no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"])", "of one or more specifications for which to generate coverage report\" \", default:", "spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items():", "importlib.util from datetime import datetime from functools import partial import testflows.settings as settings", "type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\",", "requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements):", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "+= '</div>\\n' return s def format_statistics(self, data): counts = data[\"counts\"] result_map = {", "for test in tests: result = test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied =", "s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s + \"\\n\"", "satisfied = True for test in tests: result = test[\"result\"] if result[\"result_type\"] !=", "import os import sys import json import time import base64 import threading import", "License. # You may obtain a copy of the License at # #", "metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str,", "tests = list(result[\"tests\"].values()) if not tests: return default test = tests[0][\"test\"] for attr", "template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s --- Generated", "add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for i, test in enumerate(tests): flags =", "= '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults =", "= satisfied self.unsatisfied = unsatisfied self.untested = untested def __bool__(self): return self.units >", "= args.copyright if args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"] = args.logo.read() return", "not matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\":", "if result[\"result_type\"] != \"OK\": satisfied = False if satisfied: counts.satisfied += 1 req[\"status\"]", "tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx +", "window.onload = function(){ // Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){", "testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import title", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "if t[\"test\"][\"message_time\"] > ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]):", "metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\",", "100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied / float(counts.units) *", "\"\".join(messages) return test def add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for i, test", "= Counts(\"requirements\", *([0] * 4)) for req in requirements.values(): counts.units += 1 tests", "if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\")", "- Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n'", "License, Version 2.0 (the \"License\"); # you may not use this file except", "default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\",", "vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\": []} return", "tests: counts.untested += 1 req[\"status\"] = \"untested\" else: satisfied = True for test", "spec.loader.exec_module(module) for name, value in vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name] =", "matched = True break if not matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]:", "in spec[\"specification_name\"]: matched = True break if not matched: continue _specs.append(spec) for req", "> 0: s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied", "specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values()) if not", "= \"unsatisfied\" return counts def company(self, args): d = {} if args.copyright: d[\"name\"]", "return \"<br>\" + make_title(data[\"title\"]) return \"\" def format(self, data): body = \"\" body", "100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return s def format_statistics(self, data): counts =", "if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] =", "title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one or more specifications", "= {} _specs = [] if path == \"-\": for spec in results[\"specifications\"]:", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "title, color): return ( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div", "\"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"]", "document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test procedure on", "md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as", "+ make_title(data[\"title\"]) return \"\" def format(self, data): body = \"\" body += self.format_metadata(data)", "f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\" def", "\"title\": self.format_title(data) } class Counts(object): def __init__(self, name, units, satisfied, unsatisfied, untested): self.name", "link to the input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output", "utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = {", "import epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from", "Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); });", "\"untested\": \"color-error\" } def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\" data =", "\"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data) } class Counts(object):", "\"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s", "flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue if", "requirements): counts = Counts(\"requirements\", *([0] * 4)) for req in requirements.values(): counts.units +=", "help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use", "= commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\",", "if counts.units <= 0: s += \"No tests\" else: s += '<div class=\"chart\">'", "of the name can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None):", "or implied. # See the License for the specific language governing permissions and", "a title # that include all specification names title = args.title if title", "type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\",", "else: satisfied = True for test in tests: result = test[\"result\"] if result[\"result_type\"]", "testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline", "HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test,", "class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script = \"\"\" window.onload", "s + \"\\n\" def format_table(self, data): reqs = data[\"requirements\"] s = \"\\n\\n## Coverage\\n\"", "} def requirements(self, spec_names, path, results): _requirements = {} _specs = [] if", "and # limitations under the License. import os import sys import json import", "src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\",", "'\\n<p>' + logo % {\"data\": data} + \"</p>\\n\" def format_confidential(self, data): if not", "args.title if title is None and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in", "\"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join(", "class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\"", "make_title(data[\"title\"]) return \"\" def format(self, data): body = \"\" body += self.format_metadata(data) body", "no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx + 1:]: flags", "(f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata = data[\"metadata\"] s = (", "\"\" def format(self, data): body = \"\" body += self.format_metadata(data) body += self.format_summary(data)", "'</div>\\n' return s def format_statistics(self, data): counts = data[\"counts\"] result_map = { \"OK\":", "= test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) >", "click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test procedure", "spec in specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results)", "return { \"date\": time.time(), \"version\": __version__, } def requirements(self, spec_names, path, results): _requirements", "% {\"data\": data} + \"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\"", "\"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return", "use this file except in compliance with the License. # You may obtain", "spec[\"specification_name\"]: matched = True break if not matched: continue _specs.append(spec) for req in", "} def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return", "parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-' (from input log)\", nargs=\"?\",", "break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"]", "base64 import threading import importlib.util from datetime import datetime from functools import partial", "[\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] ) +", "results): return { \"date\": time.time(), \"version\": __version__, } def requirements(self, spec_names, path, results):", "default: '-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\",", "{ \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return table def metadata(self, results): return", "f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result = test[\"result\"] cls =", "def format_summary(self, data): counts = data[\"counts\"] def template(value, title, color): return ( f'<div", "\"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\":", "testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>'", "def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status -", "for which to generate coverage report\" \", default: include all specifications. Only a", "d[\"company\"] = self.company(args) counts = d[\"counts\"] return d def generate(self, formatter, results, args):", "\"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied),", "return d def data(self, source, results, args): d = dict() specs, requirements =", "data): body = \"\" body += self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data)", "default=None): tests = list(result[\"tests\"].values()) if not tests: return default test = tests[0][\"test\"] for", "one or more specifications for which to generate coverage report\" \", default: include", "can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values())", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement", ")) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values()) if not tests:", "t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True))", "not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s", "table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return table def metadata(self,", "spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module", "table(self, results): table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return table", "= test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if", "[\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s + \"\\n\" def format_table(self,", "if args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"] = args.logo.read() return d def", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "import sys import json import time import base64 import threading import importlib.util from", "data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data): if", "'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used", "test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if", "tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is", "s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return", "+= '<div class=\"chart\">' if counts.satisfied > 0: s += template(f\"{counts.satisfied / float(counts.units) *", "be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests = list(result[\"tests\"].values()) if", "tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx + 1:]: flags =", "self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"] return d def generate(self, formatter, results,", "copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo", "\"\\n## Summary\\n\" if counts.units <= 0: s += \"No tests\" else: s +=", "return \"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data): if not", "with the License. # You may obtain a copy of the License at", "[\"Requirement\", \"Tests\"], \"rows\": [], } return table def metadata(self, results): return { \"date\":", "datetime import datetime from functools import partial import testflows.settings as settings import testflows._core.cli.arg.type", "format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\",", "law or agreed to in writing, software # distributed under the License is", "in vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\": []}", "help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file,", "testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\",", "or more specifications for which to generate coverage report\" \", default: include all", "\"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\" s +=", "# that include all specification names title = args.title if title is None", "help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\",", "\"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in", "nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices:", "= Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue result =", "def requirements(self, spec_names, path, results): _requirements = {} _specs = [] if path", "in compliance with the License. # You may obtain a copy of the", "= importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items(): if not isinstance(value, Requirement): continue", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"Satisfied\", \"green\") if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\",", "def format_metadata(self, data): metadata = data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y", "part of the name can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "req[\"status\"] = \"unsatisfied\" return counts def company(self, args): d = {} if args.copyright:", "tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts = Counts(\"requirements\", *([0] *", "report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-' (from input", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue", "to the input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format,", "[<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults = [\"Fail\", \"Error\", \"Null\"]", "test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is", "testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals import", "if flags & SKIP and settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended:", "format(self, data): body = \"\" body += self.format_metadata(data) body += self.format_summary(data) body +=", "False: continue result = test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements:", "if args.logo: d[\"logo\"] = args.logo.read() return d def data(self, source, results, args): d", "+= template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return s", "description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle", "tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent,", "return table def metadata(self, results): return { \"date\": time.time(), \"version\": __version__, } def", "results[\"specifications\"]: if spec_names: matched = False for name in spec_names: if name in", "results) # if custom title was not specified generate a title # that", "from testflows._core.cli.arg.handlers.report.copyright import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result", "from datetime import datetime from functools import partial import testflows.settings as settings import", "\"Tests\"], \"rows\": [], } return table def metadata(self, results): return { \"date\": time.time(),", "TestType.Test: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags &", "if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result", "os import sys import json import time import base64 import threading import importlib.util", "sys import json import time import base64 import threading import importlib.util from datetime", "encoding=\"utf-8\"), nargs=\"?\", help=\"input log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\",", "type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\",", "which to generate coverage report\" \", default: include all specifications. Only a unique", "( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s +", "f'<span>{value}%</span>' f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s =", "d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"] return d def generate(self,", "status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\",", "Counts from testflows._core.objects import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows =", "that is used as a link to the input log, default: job.url\", type=str,", "units self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested = untested def __bool__(self): return", "[ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script = \"\"\" window.onload = function(){ //", "r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline", "= args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self, args): results =", "description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test", "class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not", "title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] =", "\"orange\") s += '</div>\\n' return s def format_statistics(self, data): counts = data[\"counts\"] result_map", "class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div", "return attr[\"attribute_value\"] return default def table(self, results): table = { \"header\": [\"Requirement\", \"Tests\"],", "s += '</div>\\n' return s def format_statistics(self, data): counts = data[\"counts\"] result_map =", "used as a link to the input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\",", "testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\", \"\").replace(\"]\", \"\") FailResults", "d[\"counts\"] return d def generate(self, formatter, results, args): output = args.output output.write( formatter.format(self.data(args.requirements,", "governing permissions and # limitations under the License. import os import sys import", "= dict() specs, requirements = self.requirements(args.only, source, results) # if custom title was", "> 0: s += template(f\"{counts.untested / float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s +=", "body += self.format_statistics(data) body += self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data), \"confidential\":", "return \"\" def format(self, data): body = \"\" body += self.format_metadata(data) body +=", "logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]'", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "if satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"]", "}); }); } \"\"\" class Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\",", "{ \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data)", "str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s + \"\\n\" def format_table(self, data): reqs", "return self.units > 0 class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\",", "= \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return counts def company(self,", "[<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script = \"\"\"", "return s + \"\\n\" def format_title(self, data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"])", "!= \"OK\": satisfied = False if satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\"", "default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\",", "_requirements) def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended =", "module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items(): if not isinstance(value, Requirement):", "+= \"\\n\" return s + \"\\n\" def format_title(self, data): if data[\"title\"]: return \"<br>\"", "d def generate(self, formatter, results, args): output = args.output output.write( formatter.format(self.data(args.requirements, results, args))", "req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\",", "+= f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>'", "title as make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement logo =", "f'<span class=\"title\">{title}</span>' '<div class=\"slice\">' '<div class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n##", "this file except in compliance with the License. # You may obtain a", "t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP and", "time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span", "% { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\":", "\"color-fail\", \"untested\": \"color-error\" } def format_logo(self, data): if not data[\"company\"].get(\"logo\"): return \"\" data", "settings.show_skipped is False: continue if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\",", "\"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"]", "names title = args.title if title is None and specs: title = \"<br>\".join([spec[\"specification_name\"]", "for r in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description =", "Generated by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script>", "= base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\": data} + \"</p>\\n\" def format_confidential(self,", "choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\")", "+= template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0: s", "output = args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self, args): results", "}); // Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active');", "\"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\" s += \"||\" +", "def format_title(self, data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\" def format(self,", "= args.title if title is None and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec", "default test = tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name: return", "title = args.title if title is None and specs: title = \"<br>\".join([spec[\"specification_name\"] for", "\"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\"", "= False if satisfied: counts.satisfied += 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied +=", "'<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em =", "output.write(\"\\n\") def handle(self, args): results = {} formatter = Formatter() ResultsLogPipeline(args.input, results).run() self.generate(formatter,", "(.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of", "for k, v in result_map.items()] ) + \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\"", "[\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage", "continue _requirements[value.name] = {\"requirement\": value, \"tests\": []} return (_specs, _requirements) def add_test_messages(self, test,", "f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div", "item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); }); }); // Toggle test procedure on click document.querySelectorAll('.test').forEach(", "result-{cls}\">{result[\"result_type\"]}</span><span class=\"time time-inline\">{strftimedelta(result[\"message_rtime\"])}</span>{test[\"test\"][\"test_name\"]}</div>' s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s +=", "parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one or more specifications for which", "data): counts = data[\"counts\"] def template(value, title, color): return ( f'<div class=\"c100 p{value}", "= list(results[\"tests\"].values()) for i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags &", "default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\",", "no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def add_tests(self, requirements, results): tests", "by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s", "_requirements = {} _specs = [] if path == \"-\": for spec in", "= \"\" body += self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data) body +=", "for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s +", "test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def", "tests: return default test = tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"] ==", "source file, default: '-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower()", "{\"requirement\": value, \"tests\": []} return (_specs, _requirements) def add_test_messages(self, test, idx, tests, tests_by_parent,", "= '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em", "import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from", "dict() specs, requirements = self.requirements(args.only, source, results) # if custom title was not", "add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\",", "// Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active'); });", "for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags & SKIP", "required by applicable law or agreed to in writing, software # distributed under", "import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span", "not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n' \"</p>\\n\") def format_metadata(self, data): metadata", "\"Untested\", \"orange\") s += '</div>\\n' return s def format_statistics(self, data): counts = data[\"counts\"]", "def add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for i, test in enumerate(tests): flags", "in requirements.values(): counts.units += 1 tests = req[\"tests\"] if not tests: counts.untested +=", "requirements def counts(self, requirements): counts = Counts(\"requirements\", *([0] * 4)) for req in", "tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx + 1:]:", "file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied',", "XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- #", "counts.satisfied > 0: s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if", "SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter", "\"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data) } class Counts(object): def __init__(self,", "specified generate a title # that include all specification names title = args.title", "file, default: '-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"),", "from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "\"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t in tests[idx", "+= f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>'", "test def add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for i, test in enumerate(tests):", "help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str)", "as make_title from testflows._core.transform.log.report.totals import Counts from testflows._core.objects import Requirement logo = '<img", "messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx +", "= [] if path == \"-\": for spec in results[\"specifications\"]: if spec_names: matched", "1 tests = req[\"tests\"] if not tests: counts.untested += 1 req[\"status\"] = \"untested\"", "= [\"Fail\", \"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\"", "default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\", help=\"add copyright notice\", type=str) parser.add_argument(\"--confidential\", help=\"mark", "f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s + \"\\n\" def format_title(self,", "i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped", "list(result[\"tests\"].values()) if not tests: return default test = tests[0][\"test\"] for attr in test[\"attributes\"]:", "else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in", "template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0: s +=", "results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts = Counts(\"requirements\", *([0] * 4)) for", "localfromtimestamp, strftimedelta from testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals import Counts from", "as a link to the input log, default: job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\",", "data[\"counts\"] def template(value, title, color): return ( f'<div class=\"c100 p{value} {color} smaller-title\">' f'<span>{value}%</span>'", "class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script = \"\"\" window.onload =", "<section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s --- Generated by {testflows} Open-Source", "def __bool__(self): return self.units > 0 class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser", "<script> %(script)s </script> \"\"\" script = \"\"\" window.onload = function(){ // Toggle requirement", "\"Units\"] + [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] ) + \"||\\n\"", "self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body,", "__init__(self, name, units, satisfied, unsatisfied, untested): self.name = name self.units = units self.satisfied", "if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return", "coverage report\" \", default: include all specifications. Only a unique part of the", "tests: result = test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied = False if satisfied:", "parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"],", "import __version__ from testflows._core.flags import Flags, SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common", "default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout',", "= { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = { \"satisfied\":", "Inc. # TestFlows.com Open-Source Software Testing Framework (http://testflows.com) # # Licensed under the", "import testflows._core.cli.arg.type as argtype from testflows._core import __version__ from testflows._core.flags import Flags, SKIP", "title is None and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"]", "4)) for req in requirements.values(): counts.units += 1 tests = req[\"tests\"] if not", "return d def generate(self, formatter, results, args): output = args.output output.write( formatter.format(self.data(args.requirements, results,", "the License. import os import sys import json import time import base64 import", "%(body)s --- Generated by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]:", "# you may not use this file except in compliance with the License.", "None and specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] = title", "metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one or", "idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"],", "from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as", "{} if args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] = True if args.logo:", "is used as a link to the input log, default: job.url\", type=str, default=\"job.url\")", "def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage", "results): table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [], } return table def", "= title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"]", "self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested = untested def __bool__(self): return self.units", "special=[\"-\"]), help=\"requirements source file, default: '-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\",", "= \"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] + [f'<span class=\"result", "\"\"\" class Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" }", "stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\")", "return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\": data} +", "s + \"\\n\" def format_title(self, data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return", "data[\"counts\"] result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s =", "results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args) counts = d[\"counts\"]", "License for the specific language governing permissions and # limitations under the License.", "title # that include all specification names title = args.title if title is", "args): d = {} if args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] =", "return default def table(self, results): table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\": [],", "Flags, SKIP from testflows._core.testtype import TestType from testflows._core.cli.arg.common import epilog from testflows._core.cli.arg.common import", "counts = Counts(\"requirements\", *([0] * 4)) for req in requirements.values(): counts.units += 1", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "0: s += template(f\"{counts.satisfied / float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied >", "default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\") parser.add_argument(\"--copyright\", metavar=\"name\",", "break if not matched: continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\":", "+ \"\\n\" def format_summary(self, data): counts = data[\"counts\"] def template(value, title, color): return", "Report%(title)s %(body)s --- Generated by {testflows} Open-Source Test Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com", "the specific language governing permissions and # limitations under the License. import os", "metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied', 'unsatisfied', 'untested'\", choices=[\"satisfied\", \"unsatisfied\", \"untested\"], default=[\"satisfied\",", "result_map = { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n##", "testflows._core.objects import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span>", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "\"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"):", "in writing, software # distributed under the License is distributed on an \"AS", "copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs import", "\"body\": body, \"script\": script, \"title\": self.format_title(data) } class Counts(object): def __init__(self, name, units,", "class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s + \"\\n\" def format_title(self, data): if", "in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path)", "value in vars(module).items(): if not isinstance(value, Requirement): continue _requirements[value.name] = {\"requirement\": value, \"tests\":", "ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"])", "import Counts from testflows._core.objects import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows", "class Handler(HandlerBase): @classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(),", "{self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for", "specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] =", "import threading import importlib.util from datetime import datetime from functools import partial import", "Framework [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]: https://testflows.com [ClickHouse]: https://clickhouse.yandex <script> %(script)s </script> \"\"\" script =", "continue _specs.append(spec) for req in spec[\"specification_requirements\"]: _requirements[req[\"name\"]] = {\"requirement\": Requirement(**req), \"tests\": []} else:", "attr[\"attribute_value\"] return default def table(self, results): table = { \"header\": [\"Requirement\", \"Tests\"], \"rows\":", "a unique part of the name can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self,", "req in requirements.values(): counts.units += 1 tests = req[\"tests\"] if not tests: counts.untested", "log, default: stdin\", default=\"-\") parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default:", "partial import testflows.settings as settings import testflows._core.cli.arg.type as argtype from testflows._core import __version__", "{\"requirement\": Requirement(**req), \"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module)", "+= 1 tests = req[\"tests\"] if not tests: counts.untested += 1 req[\"status\"] =", "tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages) return test def add_tests(self,", "return (_specs, _requirements) def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"]", "counts = data[\"counts\"] def template(value, title, color): return ( f'<div class=\"c100 p{value} {color}", "f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s", "= { \"OK\": \"Satisfied\", \"Fail\": \"Unsatisfied\", \"Error\": \"Untested\" } s = \"\\n\\n## Statistics\\n\"", "tests = req[\"tests\"] if not tests: counts.untested += 1 req[\"status\"] = \"untested\" else:", "s = \"\\n\\n## Coverage\\n\" for r in reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i", "results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts = Counts(\"requirements\", *([0] * 4))", "d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] = self.metadata(results) d[\"counts\"] = self.counts(d[\"requirements\"]) d[\"company\"] = self.company(args)", "test in r[\"tests\"]: result = test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span", "from testflows._core.objects import Requirement logo = '<img class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span", "json import time import base64 import threading import importlib.util from datetime import datetime", "test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied = False if satisfied: counts.satisfied += 1", "import HelpFormatter from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase from testflows._core.cli.arg.handlers.report.copyright import copyright from", "not tests: return default test = tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"]", "class=\"logo\" src=\"data:image/png;base64,%(data)s\" alt=\"logo\"/>' testflows = '<span class=\"testflows-logo\"></span> [<span class=\"logo-test\">Test</span><span class=\"logo-flows\">Flows</span>]' testflows_em = testflows.replace(\"[\",", "requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-'", "+= 1 req[\"status\"] = \"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return", "name in spec_names: if name in spec[\"specification_name\"]: matched = True break if not", "that include all specification names title = args.title if title is None and", "reqs.values(): s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description:", "t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) messages.append(format_result(test[\"result\"], no_colors=True)) test[\"messages\"] = \"\".join(messages)", "status - Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p", "name in spec[\"specification_name\"]: matched = True break if not matched: continue _specs.append(spec) for", "/ float(counts.units) * 100:.0f}\", \"Satisfied\", \"green\") if counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied", "return template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\":", "2.0 (the \"License\"); # you may not use this file except in compliance", "f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s --- Generated by {testflows}", "}); }); // Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show');", "function(){ item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class Formatter: utf_icons = { \"satisfied\":", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "body += self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data) return", "= d[\"counts\"] return d def generate(self, formatter, results, args): output = args.output output.write(", "[]} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value", "path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name, value in vars(module).items(): if not isinstance(value,", "# # Unless required by applicable law or agreed to in writing, software", "parser.add_argument(\"output\", metavar=\"output\", type=argtype.file(\"w\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\",", "results, args): output = args.output output.write( formatter.format(self.data(args.requirements, results, args)) ) output.write(\"\\n\") def handle(self,", "args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] = True if args.logo: d[\"logo\"] =", "express or implied. # See the License for the specific language governing permissions", "as argtype from testflows._core import __version__ from testflows._core.flags import Flags, SKIP from testflows._core.testtype", "+ [f'<span class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] ) + \"||\\n\" s", "def generate(self, formatter, results, args): output = args.output output.write( formatter.format(self.data(args.requirements, results, args)) )", "messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if", "item.classList.toggle('active'); }); }); } \"\"\" class Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\":", "(_specs, _requirements) def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended", "testflows._core import __version__ from testflows._core.flags import Flags, SKIP from testflows._core.testtype import TestType from", "either express or implied. # See the License for the specific language governing", "report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source", "source, results) # if custom title was not specified generate a title #", "data): metadata = data[\"metadata\"] s = ( \"\\n\\n\" f\"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\\n\" f'||**Framework**||'", "self.format_logo(data), \"confidential\": self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data) } class", "= test[\"result\"] cls = result[\"result_type\"].lower() s += f'\\n<div class=\"test\"><span class=\"result result-inline result-{cls}\">{result[\"result_type\"]}</span><span class=\"time", "\"tests\": []} else: spec = importlib.util.spec_from_file_location(\"requirements\", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) for name,", "help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\",", "spec_names: matched = False for name in spec_names: if name in spec[\"specification_name\"]: matched", "s += f'\\n<div markdown=\"1\" class=\"requirement-description hidden\">\\n{description}\\n</div>' for test in r[\"tests\"]: result = test[\"result\"]", "\", default: include all specifications. Only a unique part of the name can", "import base64 import threading import importlib.util from datetime import datetime from functools import", "requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements def counts(self, requirements): counts = Counts(\"requirements\",", "k, v in result_map.items()] ) + \"||\\n\" s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for", "t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)) messages.append(format_result(t[\"result\"], no_colors=True)) else: for t in tests[idx", "data[\"company\"].get(\"logo\"): return \"\" data = base64.b64encode(data[\"company\"][\"logo\"]).decode(\"utf-8\") return '\\n<p>' + logo % {\"data\": data}", "the License. # You may obtain a copy of the License at #", "specs: title = \"<br>\".join([spec[\"specification_name\"] for spec in specs]) d[\"title\"] = title d[\"requirements\"] =", "item.nextElementSibling.classList.toggle('show'); item.classList.toggle('active'); }); }); } \"\"\" class Formatter: utf_icons = { \"satisfied\": \"\\u2714\",", "if custom title was not specified generate a title # that include all", "= [format_test(test[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True)] if getattr(TestType, test[\"test\"][\"test_type\"]) > TestType.Test: for t", "self.format_confidential(data), \"copyright\": self.format_copyright(data), \"body\": body, \"script\": script, \"title\": self.format_title(data) } class Counts(object): def", "coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\", metavar=\"requirements\", type=partial(argtype.path, special=[\"-\"]), help=\"requirements source file, default: '-' (from", "item.children[0].classList.toggle('active'); }); }); // Toggle test procedure on click document.querySelectorAll('.test').forEach( function(item){ item.addEventListener('click', function(){", "in tests: result = test[\"result\"] if result[\"result_type\"] != \"OK\": satisfied = False if", "return default test = tests[0][\"test\"] for attr in test[\"attributes\"]: if attr[\"attribute_name\"] == name:", "in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"], results[\"tests_by_id\"])) return requirements", "class=\"result result-{k.lower()}\">{v}</span>' for k, v in result_map.items()] ) + \"||\\n\" s += \"||\"", "function(){ // Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show'); item.children[0].classList.toggle('active');", "requirements.values(): counts.units += 1 tests = req[\"tests\"] if not tests: counts.untested += 1", "parser.add_argument(\"--confidential\", help=\"mark as confidential\", action=\"store_true\") parser.add_argument(\"--logo\", metavar=\"path\", type=argtype.file(\"rb\"), help='use logo image (.png)') parser.add_argument(\"--title\",", "> ended: break if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\",", "if getattr(TestType, t[\"test\"][\"test_type\"]) >= TestType.Test \\ and t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id, no_colors=True))", "data} + \"</p>\\n\" def format_confidential(self, data): if not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "\"Error\", \"Null\"] XoutResults = [\"XOK\", \"XFail\", \"XError\", \"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section>", "counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return counts def company(self, args): d =", "= {} if args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"] = True if", "+= self.format_statistics(data) body += self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data), \"confidential\": self.format_confidential(data),", "}); } \"\"\" class Formatter: utf_icons = { \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\":", "import testflows.settings as settings import testflows._core.cli.arg.type as argtype from testflows._core import __version__ from", "in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False:", "str(counts.unsatisfied), str(counts.untested)]]) + \"||\\n\" return s + \"\\n\" def format_table(self, data): reqs =", "counts.unsatisfied > 0: s += template(f\"{counts.unsatisfied / float(counts.units) * 100:.0f}\", \"Unsatisfied\", \"red\") if", "\"script\": script, \"title\": self.format_title(data) } class Counts(object): def __init__(self, name, units, satisfied, unsatisfied,", "'-' (from input log)\", nargs=\"?\", default=\"-\") parser.add_argument(\"input\", metavar=\"input\", type=argtype.logfile(\"r\", bufsize=1, encoding=\"utf-8\"), nargs=\"?\", help=\"input", "name can be specified.\" )) parser.set_defaults(func=cls()) def get_attribute(self, result, name, default=None): tests =", "import copyright from testflows._core.transform.log.pipeline import ResultsLogPipeline from testflows._core.transform.log.short import format_test, format_result from testflows._core.utils.timefuncs", "\"<br>\" + make_title(data[\"title\"]) return \"\" def format(self, data): body = \"\" body +=", "= f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s --- Generated by", "/ float(counts.units) * 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return s def format_statistics(self,", "logo image (.png)') parser.add_argument(\"--title\", metavar=\"name\", help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\",", "help=\"attribute that is used as a link to the input log, default: job.url\",", "help='output file, default: stdout', default=\"-\") parser.add_argument(\"--show\", metavar=\"status\", type=str, nargs=\"+\", help=\"verification status. Choices: 'satisfied',", "include all specifications. Only a unique part of the name can be specified.\"", "tests = list(results[\"tests\"].values()) for i, test in enumerate(tests): flags = Flags(test[\"test\"][\"test_flags\"]) if flags", "%d, %Y %-H:%M}||\\n\" f'||**Framework**||' f'{testflows} {metadata[\"version\"]}||\\n' ) return s + \"\\n\" def format_summary(self,", "Handler(HandlerBase): @classmethod def add_command(cls, commands): parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate", "return test def add_tests(self, requirements, results): tests = list(results[\"tests\"].values()) for i, test in", "type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one or more specifications for", "metadata(self, results): return { \"date\": time.time(), \"version\": __version__, } def requirements(self, spec_names, path,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "threading import importlib.util from datetime import datetime from functools import partial import testflows.settings", "= units self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested = untested def __bool__(self):", "for spec in specs]) d[\"title\"] = title d[\"requirements\"] = self.add_tests(requirements, results) d[\"metadata\"] =", "self.untested = untested def __bool__(self): return self.units > 0 class Handler(HandlerBase): @classmethod def", "\"tests\": []} return (_specs, _requirements) def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id): started", "\"\\n\" def format_title(self, data): if data[\"title\"]: return \"<br>\" + make_title(data[\"title\"]) return \"\" def", "help=\"custom title\", type=str) parser.add_argument(\"--only\", metavar=\"name\", type=str, default=[], nargs=\"+\", help=(\"name of one or more", "False: continue if t[\"test\"][\"message_time\"] > ended: break if t[\"test\"][\"test_id\"].startswith(test[\"test\"][\"test_id\"]): messages.append(format_test(t[\"test\"], \"\", tests_by_parent, tests_by_id,", "# Requirements Coverage Report%(title)s %(body)s --- Generated by {testflows} Open-Source Test Framework [<span", "'<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\" if counts.units <= 0: s", "{ \"satisfied\": \"\\u2714\", \"unsatisfied\": \"\\u2718\", \"untested\": \"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\",", "generate coverage report\" \", default: include all specifications. Only a unique part of", "not data[\"company\"].get(\"confidential\"): return \"\" return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data):", "> TestType.Test: for t in tests[idx + 1:]: flags = Flags(t[\"test\"][\"test_flags\"]) if flags", "test[\"result\"] for requirement in test[\"test\"][\"requirements\"]: if requirement[\"requirement_name\"] in requirements: requirements[requirement[\"requirement_name\"]][\"tests\"].append(self.add_test_messages(test, i, tests, results[\"tests_by_parent\"],", "tests, tests_by_parent, tests_by_id): started = test[\"test\"][\"message_time\"] ended = test[\"result\"][\"message_time\"] messages = [format_test(test[\"test\"], \"\",", "return f'\\n<p class=\"confidential\">Document status - Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return", "except in compliance with the License. # You may obtain a copy of", "else: s += '<div class=\"chart\">' if counts.satisfied > 0: s += template(f\"{counts.satisfied /", "} s = \"\\n\\n## Statistics\\n\" s += \"||\" + \"||\".join( [\"<span></span>\", \"Units\"] +", "\"unsatisfied\", \"untested\"], default=[\"satisfied\", \"unsatisfied\", \"untested\"]) parser.add_argument(\"--input-link\", metavar=\"attribute\", help=\"attribute that is used as a", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "parser = commands.add_parser(\"coverage\", help=\"requirements coverage report\", epilog=epilog(), description=\"Generate requirements coverage report.\", formatter_class=HelpFormatter) parser.add_argument(\"requirements\",", "= name self.units = units self.satisfied = satisfied self.unsatisfied = unsatisfied self.untested =", "import localfromtimestamp, strftimedelta from testflows._core.utils.string import title as make_title from testflows._core.transform.log.report.totals import Counts", "+= f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo tests\\n</div>' s += \"\\n\" return s + \"\\n\" def", "= unsatisfied self.untested = untested def __bool__(self): return self.units > 0 class Handler(HandlerBase):", "= \"untested\" else: satisfied = True for test in tests: result = test[\"result\"]", "args)) ) output.write(\"\\n\") def handle(self, args): results = {} formatter = Formatter() ResultsLogPipeline(args.input,", "if not tests: counts.untested += 1 req[\"status\"] = \"untested\" else: satisfied = True", "== name: return attr[\"attribute_value\"] return default def table(self, results): table = { \"header\":", "\"\\u270E\" } icon_colors = { \"satisfied\": \"color-ok\", \"unsatisfied\": \"color-fail\", \"untested\": \"color-error\" } def", "= function(){ // Toggle requirement description on click document.querySelectorAll('.requirement').forEach( function(item){ item.addEventListener('click', function(){ item.nextElementSibling.classList.toggle('show');", "s += f'\\n<div class=\"test-procedure hidden\">\\n```testflows\\n{test[\"messages\"]}\\n```\\n</div>' if not r[\"tests\"]: s += f'\\n<div class=\"no-tests\">\\n<span class=\"result-inline\">\\u270E</span>\\nNo", "s += f'\\n<section class=\"requirement\"><span class=\"requirement-inline\"><i class=\"utf-icon {self.icon_colors[r[\"status\"]]}\">{self.utf_icons[r[\"status\"]]}</i>{r[\"requirement\"].name}</span></section>' description = r[\"requirement\"].description.replace(\"\\\\n\",\"\\n\") if description: s", "Flags(test[\"test\"][\"test_flags\"]) if flags & SKIP and settings.show_skipped is False: continue result = test[\"result\"]", "company(self, args): d = {} if args.copyright: d[\"name\"] = args.copyright if args.confidential: d[\"confidential\"]", "time import base64 import threading import importlib.util from datetime import datetime from functools", "\"XNull\"] template = f\"\"\" <section class=\"clearfix\">%(logo)s%(confidential)s%(copyright)s</section> --- # Requirements Coverage Report%(title)s %(body)s ---", "\"satisfied\" else: counts.unsatisfied += 1 req[\"status\"] = \"unsatisfied\" return counts def company(self, args):", "import format_test, format_result from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta from testflows._core.utils.string import title as", "* 100:.0f}\", \"Untested\", \"orange\") s += '</div>\\n' return s def format_statistics(self, data): counts", "Confidential</p>\\n' def format_copyright(self, data): if not data[\"company\"].get(\"name\"): return \"\" return (f'\\n<p class=\"copyright\">\\n' f'{copyright(data[\"company\"][\"name\"])}\\n'", "+= self.format_metadata(data) body += self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data) return template.strip()", "class Counts(object): def __init__(self, name, units, satisfied, unsatisfied, untested): self.name = name self.units", "import json import time import base64 import threading import importlib.util from datetime import", "job.url\", type=str, default=\"job.url\") parser.add_argument(\"--format\", metavar=\"type\", type=str, help=\"output format, default: md (Markdown)\", choices=[\"md\"], default=\"md\")", "args): d = dict() specs, requirements = self.requirements(args.only, source, results) # if custom", "class=\"bar\"></div>' '<div class=\"fill\"></div>' '</div>' '</div>\\n') s = \"\\n## Summary\\n\" if counts.units <= 0:", "self.format_summary(data) body += self.format_statistics(data) body += self.format_table(data) return template.strip() % { \"logo\": self.format_logo(data),", "s += \"||\" + \"||\".join([f\"<center>{i}</center>\" for i in [\"**Requirements**\", str(counts.units), str(counts.satisfied), str(counts.unsatisfied), str(counts.untested)]])", "specifications. Only a unique part of the name can be specified.\" )) parser.set_defaults(func=cls())" ]
[ "group in self.param_groups: for p in group[\"params\"]: if p.grad is None: continue B,", "# print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update)", "but l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent,", "This is used to calculate the losses Return: - size (B,) - Ranking", "continue B, D = p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] =", "increasing, so no need of that here # and no -dist also, as", "BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x", "= 5 # padding idx push it to corner set_dim0(self.table.weight) def forward(self, I,", "from tensorboardX import SummaryWriter from torch.utils.data import Dataset, DataLoader import datasets import pickle", "grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize", "lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p, p))", "torch.utils.data import Dataset, DataLoader import datasets import pickle import matplotlib matplotlib.use(\"Agg\") # this", "learning_rate is not None else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def", "nn from torch import optim from tqdm import trange, tqdm from collections import", "self.arange if has_parent: # if no child go for parent valid_idxs = arange[self.pairwise_matrix[arange,", "indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ]", "is not None else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self):", "= {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for group in self.param_groups: for p", "> 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm is more than", "* p ) # print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] * proj)", "- The `j` document must be the first of the N indices. This", "dists.reshape(B, N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def", "is nothing but l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i])", "indices = arange indices = indices[indices != i] if has_child: indices = indices[(self.pairwise_matrix[i,indices]", "= arange indices = indices[indices != i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] <", "params, learning_rate=None): learning_rate = learning_rate if learning_rate is not None else 0.01 defaults", "\"\"\" This will embed `n_items` in a `dim` dimensional lorentz space. \"\"\" def", "Ks): \"\"\" Using the pairwise similarity matrix, generate the following inputs and provide", "= (self.cnter + 1) % self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child =", "dists = -arcosh(dists) # print(dists) # ---------- turn back to per-sample shape dists", "= -1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) #", "= Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks) # ----------", "and provide to this function. Inputs: - I : - long tensor -", "update = torch.where(is_nan_inf, p, update) update[0, :] = p[0, :] # no ❤️", "-> B m = x * y result = m[:, 1:].sum(dim=1) - m[:,", "Inputs: - I : - long tensor - size (B,) - This denotes", "import numpy as np from torch import nn from torch import optim from", "has_parent = (self.pairwise_matrix[:, i] > 0).sum() if self.cnter == 0: arange = np.random.permutation(self.arange)", "Ranking loss calculated using document to the given `i` document. \"\"\" n_ks =", "print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count = 0", "np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent == predicted_parent count", "dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim = dim self.table = nn.Embedding(n_items +", "x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn) result", "in a `dim` dimensional lorentz space. \"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__()", "global grad_norm is more than 1 h = (p.grad.data / grad_norm) @ gl", "and no child\") indices = arange indices = indices[indices != i] if has_child:", "(self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum() if self.cnter == 0:", "imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x **", "* N, D) uks = uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui, uks)", "NOTE: the paper does not mention the square part of the equation but", "grad_norm is more than 1 h = (p.grad.data / grad_norm) @ gl proj", "(p.grad.data / grad_norm) @ gl proj = ( h - ( lorentz_scalar_product(p, h)", "if self.cnter == 0: arange = np.random.permutation(self.arange) else: arange = self.arange if has_parent:", "the N indices. This is used to calculate the losses Return: - size", "in group[\"params\"]: if p.grad is None: continue B, D = p.size() gl =", "self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:, :1] + 1 ) # diffeomorphism", "j = valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no parent", "- 1)) def lorentz_scalar_product(x, y): # BD, BD -> B m = x", "DataLoader import datasets import pickle import matplotlib matplotlib.use(\"Agg\") # this needs to come", "( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p,", "function. Inputs: - I : - long tensor - size (B,) - This", "i, -acosh(-l(x,y)) is nothing but l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent =", "result, x) # only update if tangent norm is > 0 return result", "torch.ones_like(dists) + 1e-6, dists) # sometimes 2 embedding can come very close in", "sample_size self.arange = np.arange(0, self.n_items) self.cnter = 0 self.batch_size = batch_size def __len__(self):", "= n_items self.dim = dim self.table = nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight,", "import datetime from tensorboardX import SummaryWriter from torch.utils.data import Dataset, DataLoader import datasets", "if has_parent: # if no child go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]]", "idx push it to corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using the", "embedding can come very close in R^D. # when calculating the lorenrz inner", "result def tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v):", "- Ranking loss calculated using document to the given `i` document. \"\"\" n_ks", "go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i]", "i] > 0).sum() if self.cnter == 0: arange = np.random.permutation(self.arange) else: arange =", "= torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm =", "ui.size() ui = ui.reshape(B * N, D) uks = uks.reshape(B * N, D)", "acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y) # print(dists) predicted_parent =", "None: continue B, D = p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0]", "maxnorm=1e2) # otherwise leaves will explode # NOTE: the paper does not mention", "also, as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y) # print(dists)", "more than 1 h = (p.grad.data / grad_norm) @ gl proj = (", "1 ) # diffeomorphism transform to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def", "# BD, BD -> B m = x * y result = m[:,", "p.grad is None: continue B, D = p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype)", "torch.log(x + torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x, y): # BD, BD", "1 mask = mask * -10000.0 dists = lorentz_scalar_product(x, table) + mask dists", "of the equation but if # you try to derive it you get", "mask * -10000.0 dists = lorentz_scalar_product(x, table) + mask dists = ( dists.cpu().numpy()", "if p.grad is None: continue B, D = p.size() gl = torch.eye(D, device=p.device,", "- size (B,) - Ranking loss calculated using document to the given `i`", "= ( dists.cpu().numpy() ) # arccosh is monotonically increasing, so no need of", "p, update) update[0, :] = p[0, :] # no ❤️ for embedding update", "self.cnter == 0: arange = np.random.permutation(self.arange) else: arange = self.arange if has_parent: #", "from datetime import datetime from tensorboardX import SummaryWriter from torch.utils.data import Dataset, DataLoader", "B m = x * y result = m[:, 1:].sum(dim=1) - m[:, 0]", "torch.where(tn_expand > 0, result, x) # only update if tangent norm is >", "given `i` document. \"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1)", "# you try to derive it you get a square term in the", "= -arcosh(dists) # print(dists) # ---------- turn back to per-sample shape dists =", "torch.tensor(table[1:]) n = pair_mat.shape[0] for i in range(1, n): # 0 padding, 1", "batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange =", "update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update =", "indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ :", "super().__init__(params, defaults=defaults) def step(self): for group in self.param_groups: for p in group[\"params\"]: if", "set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode #", "= tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn) * (v /", "def step(self): for group in self.param_groups: for p in group[\"params\"]: if p.grad is", "padding, 1 root, we leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N,", "which come from the nearest neighbor sample. - The `j` document must be", "pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange", "uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1,", "self.n_items def __getitem__(self, i): self.cnter = (self.cnter + 1) % self.batch_size I =", "return result def tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x,", "other matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x +", "torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks) # ---------- reshape for calculation B,", "it you get a square term in the equation dim0 = torch.sqrt(1 +", "min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[", "def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim = dim self.table", "trange, tqdm from collections import Counter from datetime import datetime from tensorboardX import", "return x # ========================= models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate =", "import datasets import pickle import matplotlib matplotlib.use(\"Agg\") # this needs to come before", "x * y result = m[:, 1:].sum(dim=1) - m[:, 0] return result def", "equation dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:, 0] = dim0", "min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no parent and no child\")", "= lorentz_scalar_product(x, table) + mask dists = ( dists.cpu().numpy() ) # arccosh is", "l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent,", "if tangent norm is > 0 return result def set_dim0(x): x = torch.renorm(x,", "- m[:, 0] return result def tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x,", "a `dim` dimensional lorentz space. \"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items", "6 with torch.no_grad(): self.table.weight[0] = 5 # padding idx push it to corner", "uks = uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists", "calculate the losses Return: - size (B,) - Ranking loss calculated using document", "the `i` used in all equations. - Ks : - long tensor -", "for p in group[\"params\"]: if p.grad is None: continue B, D = p.size()", "`N` documents which come from the nearest neighbor sample. - The `j` document", "self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j]", "calculated using document to the given `i` document. \"\"\" n_ks = Ks.size()[1] ui", "table = self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:, :1] + 1 )", "become -0.99(no idea!), then arcosh will become nan dists = -arcosh(dists) # print(dists)", "of that here # and no -dist also, as acosh in m i,", "if global grad_norm is more than 1 h = (p.grad.data / grad_norm) @", "learning_rate} super().__init__(params, defaults=defaults) def step(self): for group in self.param_groups: for p in group[\"params\"]:", "---------- turn back to per-sample shape dists = dists.reshape(B, N) loss = -(dists[:,", "to corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using the pairwise similarity matrix,", "Return: - size (B,) - Ranking loss calculated using document to the given", "self.n_items = n_items self.dim = dim self.table = nn.Embedding(n_items + 1, dim, padding_idx=0)", "gl proj = ( h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1)", "1e-6, dists) # sometimes 2 embedding can come very close in R^D. #", "count = 0 table = torch.tensor(table[1:]) n = pair_mat.shape[0] for i in range(1,", "loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self): table", "= self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def", "table[:, 1:] / ( table[:, :1] + 1 ) # diffeomorphism transform to", "========================= models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate = learning_rate if learning_rate", "in R^D. # when calculating the lorenrz inner product, # -1 can become", "lorentz_scalar_product(x, table) + mask dists = ( dists.cpu().numpy() ) # arccosh is monotonically", "def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:, :1] +", ": - long tensor - size (B, N) - This denotes at max", "return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count = 0 table =", "y result = m[:, 1:].sum(dim=1) - m[:, 0] return result def tangent_norm(x): #", "if # you try to derive it you get a square term in", "no need of that here # and no -dist also, as acosh in", "table) + mask dists = ( dists.cpu().numpy() ) # arccosh is monotonically increasing,", "= torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0, :] = p[0,", "* proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0,", "N indices. This is used to calculate the losses Return: - size (B,)", "= self.arange if has_parent: # if no child go for parent valid_idxs =", "{i} has no parent and no child\") indices = arange indices = indices[indices", "set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using the pairwise similarity matrix, generate the", "dists = lorentz_scalar_product(x, table) + mask dists = ( dists.cpu().numpy() ) # arccosh", "= sample_size self.arange = np.arange(0, self.n_items) self.cnter = 0 self.batch_size = batch_size def", "for i in range(1, n): # 0 padding, 1 root, we leave those", "x = self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset):", "torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm is more", "Exception(f\"Node {i} has no parent and no child\") indices = arange indices =", "* x + torch.sinh(tn) * (v / tn) result = torch.where(tn_expand > 0,", "has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise", "accuracy\" count = 0 table = torch.tensor(table[1:]) n = pair_mat.shape[0] for i in", "import random import numpy as np from torch import nn from torch import", "i in range(1, n): # 0 padding, 1 root, we leave those two", "will explode # NOTE: the paper does not mention the square part of", "back to per-sample shape dists = dists.reshape(B, N) loss = -(dists[:, 0] -", "i): self.cnter = (self.cnter + 1) % self.batch_size I = torch.Tensor([i + 1]).squeeze().long()", "datetime import datetime from tensorboardX import SummaryWriter from torch.utils.data import Dataset, DataLoader import", "is used to calculate the losses Return: - size (B,) - Ranking loss", "p in group[\"params\"]: if p.grad is None: continue B, D = p.size() gl", "min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices)", "mask dists = ( dists.cpu().numpy() ) # arccosh is monotonically increasing, so no", "has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]]", "to come before other matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x):", "equations. - Ks : - long tensor - size (B, N) - This", "models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate = learning_rate if learning_rate is", "self.table.weight[0] = 5 # padding idx push it to corner set_dim0(self.table.weight) def forward(self,", "elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j] else:", ") # arccosh is monotonically increasing, so no need of that here #", "= ui.size() ui = ui.reshape(B * N, D) uks = uks.reshape(B * N,", "print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent == predicted_parent count = count /", "child go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min =", "else: raise Exception(f\"Node {i} has no parent and no child\") indices = arange", "def __init__(self, params, learning_rate=None): learning_rate = learning_rate if learning_rate is not None else", "`i` document. \"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1) uks", "dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with torch.no_grad(): self.table.weight[0] = 5", "import sys import torch import random import numpy as np from torch import", "similarity matrix, generate the following inputs and provide to this function. Inputs: -", "def exp_map(x, v): # BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand =", "p) ).unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"]", "needs to come before other matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def", "matrix, generate the following inputs and provide to this function. Inputs: - I", "BD, BD -> B m = x * y result = m[:, 1:].sum(dim=1)", "-1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only", "using document to the given `i` document. \"\"\" n_ks = Ks.size()[1] ui =", "as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y) # print(dists) predicted_parent", "= (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum() if self.cnter ==", "| torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0, :] = p[0, :] #", "(v / tn) result = torch.where(tn_expand > 0, result, x) # only update", "] # print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count", "poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check =", "datasets import pickle import matplotlib matplotlib.use(\"Agg\") # this needs to come before other", "document. \"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1) uks =", "inputs and provide to this function. Inputs: - I : - long tensor", "the first of the N indices. This is used to calculate the losses", "= dim self.table = nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) #", "BD -> B m = x * y result = m[:, 1:].sum(dim=1) -", "must be the first of the N indices. This is used to calculate", "v): # BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1])", "\"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks)", "<= 1, torch.ones_like(dists) + 1e-6, dists) # sometimes 2 embedding can come very", "len(table)) mask[i] = 1 mask = mask * -10000.0 dists = lorentz_scalar_product(x, table)", "than 1 h = (p.grad.data / grad_norm) @ gl proj = ( h", "+ torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x, y): # BD, BD ->", "!= i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices,", "calculating the lorenrz inner product, # -1 can become -0.99(no idea!), then arcosh", "= torch.tensor(table[1:]) n = pair_mat.shape[0] for i in range(1, n): # 0 padding,", "tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn) *", "I = torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:,", "-acosh(-l(x,y)) is nothing but l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:,", "arcosh(x): return torch.log(x + torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x, y): #", "def lorentz_scalar_product(x, y): # BD, BD -> B m = x * y", "product, # -1 can become -0.99(no idea!), then arcosh will become nan dists", "+ 1e-6)) return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:] /", "grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm", "actual_parent == predicted_parent count = count / (pair_mat.shape[0] - 1) * 100 return", "2 - 1)) def lorentz_scalar_product(x, y): # BD, BD -> B m =", "+ 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with torch.no_grad(): self.table.weight[0]", "lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:, :1] + 1", "- torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:,", "# only update if tangent norm is > 0 return result def set_dim0(x):", "= 1 mask = mask * -10000.0 dists = lorentz_scalar_product(x, table) + mask", "0 return result def set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise", "torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i] >", "min = self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min", "torch.tensor([0.0] * len(table)) mask[i] = 1 mask = mask * -10000.0 dists =", "* (v / tn) result = torch.where(tn_expand > 0, result, x) # only", "x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask = torch.tensor([0.0] * len(table)) mask[i]", "corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using the pairwise similarity matrix, generate", "% self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent", "= np.arange(0, self.n_items) self.cnter = 0 self.batch_size = batch_size def __len__(self): return self.n_items", "1:] ** 2).sum(dim=1)) x[:, 0] = dim0 return x # ========================= models class", "for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif", "= valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j =", "grad_norm) @ gl proj = ( h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p,", "here # and no -dist also, as acosh in m i, -acosh(-l(x,y)) is", "0, result, x) # only update if tangent norm is > 0 return", "tangent norm is > 0 return result def set_dim0(x): x = torch.renorm(x, p=2,", "+ (x[:, 1:] ** 2).sum(dim=1)) x[:, 0] = dim0 return x # =========================", "the nearest neighbor sample. - The `j` document must be the first of", "lorentz_scalar_product(x, y): # BD, BD -> B m = x * y result", "reshape for calculation B, N, D = ui.size() ui = ui.reshape(B * N,", "np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat):", "This denotes at max `N` documents which come from the nearest neighbor sample.", "end=\"\\n\\n\") count += actual_parent == predicted_parent count = count / (pair_mat.shape[0] - 1)", "tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn) * (v", "dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists)", "import nn from torch import optim from tqdm import trange, tqdm from collections", "self.table = nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6", "- ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) # print(p,", "/ tn) result = torch.where(tn_expand > 0, result, x) # only update if", "from torch.utils.data import Dataset, DataLoader import datasets import pickle import matplotlib matplotlib.use(\"Agg\") #", "the given `i` document. \"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks,", "# ---------- reshape for calculation B, N, D = ui.size() ui = ui.reshape(B", "n_ks, dim=1) uks = self.table(Ks) # ---------- reshape for calculation B, N, D", "__getitem__(self, i): self.cnter = (self.cnter + 1) % self.batch_size I = torch.Tensor([i +", "h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) #", "-dist also, as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y) #", "self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] #", "= pair_mat.shape[0] for i in range(1, n): # 0 padding, 1 root, we", ") # print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf =", "Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks) return I,", "`j` document must be the first of the N indices. This is used", "term in the equation dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:,", "a square term in the equation dim0 = torch.sqrt(1 + (x[:, 1:] **", "= indices[indices != i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices", "\"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim = dim", "I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count = 0 table = torch.tensor(table[1:])", ":] # no ❤️ for embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\"", "square part of the equation but if # you try to derive it", "= table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask = torch.tensor([0.0] * len(table)) mask[i] =", "no parent and no child\") indices = arange indices = indices[indices != i]", "learning_rate = learning_rate if learning_rate is not None else 0.01 defaults = {\"learning_rate\":", "is > 0 return result def set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2)", "from collections import Counter from datetime import datetime from tensorboardX import SummaryWriter from", "- size (B, N) - This denotes at max `N` documents which come", "1e-6)) return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:] / (", "print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\")", "when calculating the lorenrz inner product, # -1 can become -0.99(no idea!), then", "matplotlib.use(\"Agg\") # this needs to come before other matplotlib imports import matplotlib.pyplot as", "tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x +", "= torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i]", "tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD,", "raise Exception(f\"Node {i} has no parent and no child\") indices = arange indices", "def __len__(self): return self.n_items def __getitem__(self, i): self.cnter = (self.cnter + 1) %", "def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x, x)", "0] = dim0 return x # ========================= models class RSGD(optim.Optimizer): def __init__(self, params,", "p ) # print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf", "self.dim = dim self.table = nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range)", "dists) # sometimes 2 embedding can come very close in R^D. # when", "torch import random import numpy as np from torch import nn from torch", "B, D = p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1", "h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p, p)) update", "update) update[0, :] = p[0, :] # no ❤️ for embedding update =", "indices[indices != i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices =", "= torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks) # ---------- reshape for calculation", "+ 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix =", "Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks) # ---------- reshape", "sys import torch import random import numpy as np from torch import nn", "-lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists) # sometimes", "def arcosh(x): return torch.log(x + torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x, y):", "from torch import optim from tqdm import trange, tqdm from collections import Counter", "np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks) return I, torch.Tensor(Ks).long() def", "no child\") indices = arange indices = indices[indices != i] if has_child: indices", "BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD, BD ->", "mention the square part of the equation but if # you try to", "# BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result", "tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn)", "this function. Inputs: - I : - long tensor - size (B,) -", "is more than 1 h = (p.grad.data / grad_norm) @ gl proj =", "learning_rate=None): learning_rate = learning_rate if learning_rate is not None else 0.01 defaults =", "Lorentz(nn.Module): \"\"\" This will embed `n_items` in a `dim` dimensional lorentz space. \"\"\"", "D) uks = uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui, uks) dists =", "None else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for group", "= self.table(Ks) # ---------- reshape for calculation B, N, D = ui.size() ui", "denotes the `i` used in all equations. - Ks : - long tensor", "= batch_size def __len__(self): return self.n_items def __getitem__(self, i): self.cnter = (self.cnter +", "dists = dists.reshape(B, N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return", "arcosh will become nan dists = -arcosh(dists) # print(dists) # ---------- turn back", "== predicted_parent count = count / (pair_mat.shape[0] - 1) * 100 return count", "batch_size def __len__(self): return self.n_items def __getitem__(self, i): self.cnter = (self.cnter + 1)", "* n_ks, dim=1) uks = self.table(Ks) # ---------- reshape for calculation B, N,", "#print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I,", "Counter from datetime import datetime from tensorboardX import SummaryWriter from torch.utils.data import Dataset,", "- size (B,) - This denotes the `i` used in all equations. -", "document must be the first of the N indices. This is used to", "monotonically increasing, so no need of that here # and no -dist also,", "two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask = torch.tensor([0.0] * len(table))", "- Ks : - long tensor - size (B, N) - This denotes", "import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x ** 2", "mask = mask * -10000.0 dists = lorentz_scalar_product(x, table) + mask dists =", "long tensor - size (B, N) - This denotes at max `N` documents", "init_range) # equation 6 with torch.no_grad(): self.table.weight[0] = 5 # padding idx push", "return torch.log(x + torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x, y): # BD,", "= mask * -10000.0 dists = lorentz_scalar_product(x, table) + mask dists = (", "x + torch.sinh(tn) * (v / tn) result = torch.where(tn_expand > 0, result,", "to the given `i` document. \"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] *", "= self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:, :1] + 1 ) #", "= torch.tensor([0.0] * len(table)) mask[i] = 1 mask = mask * -10000.0 dists", "N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self):", "grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm is more than 1 h", "D = ui.size() ui = ui.reshape(B * N, D) uks = uks.reshape(B *", "= pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0, self.n_items) self.cnter", "m = x * y result = m[:, 1:].sum(dim=1) - m[:, 0] return", "device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm >", "Using the pairwise similarity matrix, generate the following inputs and provide to this", "= -lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists) #", "= nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with", "-(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy()", "from tqdm import trange, tqdm from collections import Counter from datetime import datetime", "calculation B, N, D = ui.size() ui = ui.reshape(B * N, D) uks", "m[:, 0] return result def tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x, x))", "size (B,) - Ranking loss calculated using document to the given `i` document.", "but if # you try to derive it you get a square term", "arange = self.arange if has_parent: # if no child go for parent valid_idxs", "0] return result def tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def", "sample. - The `j` document must be the first of the N indices.", "used in all equations. - Ks : - long tensor - size (B,", "return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD, BD -> BD tn =", "** 2).sum(dim=1)) x[:, 0] = dim0 return x # ========================= models class RSGD(optim.Optimizer):", "at max `N` documents which come from the nearest neighbor sample. - The", "tqdm import trange, tqdm from collections import Counter from datetime import datetime from", "nothing but l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) #", "p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed `n_items` in a `dim` dimensional lorentz", "has_parent: # if no child go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j", "- long tensor - size (B, N) - This denotes at max `N`", "= lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size,", "import pickle import matplotlib matplotlib.use(\"Agg\") # this needs to come before other matplotlib", "indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError()", "pair_mat): \"Reconstruction accuracy\" count = 0 table = torch.tensor(table[1:]) n = pair_mat.shape[0] for", "self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0 return", "come before other matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return", "self.sample_size ] # print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\"", "= np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks) return I, torch.Tensor(Ks).long()", "# this needs to come before other matplotlib imports import matplotlib.pyplot as plt", "those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask = torch.tensor([0.0] *", "len(table[i])]) # N, D mask = torch.tensor([0.0] * len(table)) mask[i] = 1 mask", "n = pair_mat.shape[0] for i in range(1, n): # 0 padding, 1 root,", "np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent", "max `N` documents which come from the nearest neighbor sample. - The `j`", "is monotonically increasing, so no need of that here # and no -dist", "no -dist also, as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y)", "result = torch.where(tn_expand > 0, result, x) # only update if tangent norm", "only update if tangent norm is > 0 return result def set_dim0(x): x", "gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm", "-group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p, update)", "explode # NOTE: the paper does not mention the square part of the", ":1] + 1 ) # diffeomorphism transform to poincare ball def get_lorentz_table(self): return", "# when calculating the lorenrz inner product, # -1 can become -0.99(no idea!),", "dim0 return x # ========================= models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate", "matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x", "does not mention the square part of the equation but if # you", "x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode # NOTE:", "- This denotes at max `N` documents which come from the nearest neighbor", "child\") indices = arange indices = indices[indices != i] if has_child: indices =", "The `j` document must be the first of the N indices. This is", "D = p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm", "dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:, 0] = dim0 return", "arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no", ": - long tensor - size (B,) - This denotes the `i` used", "i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent == predicted_parent count =", "sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0,", "from the nearest neighbor sample. - The `j` document must be the first", "torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:]", "torch.sinh(tn) * (v / tn) result = torch.where(tn_expand > 0, result, x) #", "space. \"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim =", "0 padding, 1 root, we leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) #", "result def set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will", "BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result =", "return self.n_items def __getitem__(self, i): self.cnter = (self.cnter + 1) % self.batch_size I", "update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed `n_items` in a", "# ---------- turn back to per-sample shape dists = dists.reshape(B, N) loss =", "+ 1) % self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] >", "come very close in R^D. # when calculating the lorenrz inner product, #", "= torch.where(is_nan_inf, p, update) update[0, :] = p[0, :] # no ❤️ for", "not mention the square part of the equation but if # you try", "self.batch_size = batch_size def __len__(self): return self.n_items def __getitem__(self, i): self.cnter = (self.cnter", "= torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm is", "I, Ks): \"\"\" Using the pairwise similarity matrix, generate the following inputs and", "turn back to per-sample shape dists = dists.reshape(B, N) loss = -(dists[:, 0]", ": self.sample_size ] # print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction", "recon(table, pair_mat): \"Reconstruction accuracy\" count = 0 table = torch.tensor(table[1:]) n = pair_mat.shape[0]", "= 0 self.batch_size = batch_size def __len__(self): return self.n_items def __getitem__(self, i): self.cnter", "def recon(table, pair_mat): \"Reconstruction accuracy\" count = 0 table = torch.tensor(table[1:]) n =", "import matplotlib matplotlib.use(\"Agg\") # this needs to come before other matplotlib imports import", "m i, -acosh(-l(x,y)) is nothing but l(x,y) # print(dists) predicted_parent = np.argmax(dists) actual_parent", "then arcosh will become nan dists = -arcosh(dists) # print(dists) # ---------- turn", "\"\"\" Using the pairwise similarity matrix, generate the following inputs and provide to", "pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0, self.n_items) self.cnter = 0 self.batch_size =", "self.cnter = 0 self.batch_size = batch_size def __len__(self): return self.n_items def __getitem__(self, i):", "(self.pairwise_matrix[:, i] > 0).sum() if self.cnter == 0: arange = np.random.permutation(self.arange) else: arange", "i, end=\"\\n\\n\") count += actual_parent == predicted_parent count = count / (pair_mat.shape[0] -", "5 # padding idx push it to corner set_dim0(self.table.weight) def forward(self, I, Ks):", "following inputs and provide to this function. Inputs: - I : - long", "# -1 can become -0.99(no idea!), then arcosh will become nan dists =", "uks) dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists) # sometimes 2", "neighbor sample. - The `j` document must be the first of the N", "self.sample_size = sample_size self.arange = np.arange(0, self.n_items) self.cnter = 0 self.batch_size = batch_size", "return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0", "else: arange = self.arange if has_parent: # if no child go for parent", "come from the nearest neighbor sample. - The `j` document must be the", "indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks =", "`dim` dimensional lorentz space. \"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items =", "actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent ==", "torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists) # sometimes 2 embedding can come", "torch import nn from torch import optim from tqdm import trange, tqdm from", "0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for group in self.param_groups:", "try to derive it you get a square term in the equation dim0", "arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i,", "first of the N indices. This is used to calculate the losses Return:", "0: arange = np.random.permutation(self.arange) else: arange = self.arange if has_parent: # if no", "# only normalize if global grad_norm is more than 1 h = (p.grad.data", "(B, N) - This denotes at max `N` documents which come from the", "= torch.cosh(tn) * x + torch.sinh(tn) * (v / tn) result = torch.where(tn_expand", "plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x,", "SummaryWriter from torch.utils.data import Dataset, DataLoader import datasets import pickle import matplotlib matplotlib.use(\"Agg\")", "to derive it you get a square term in the equation dim0 =", "we leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask =", "= torch.where(tn_expand > 0, result, x) # only update if tangent norm is", "+ 1 ) # diffeomorphism transform to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy()", "= self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no parent and no child\") indices", "lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p, p)) update = exp_map(p,", "> 0).sum() if self.cnter == 0: arange = np.random.permutation(self.arange) else: arange = self.arange", "-> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) *", "only normalize if global grad_norm is more than 1 h = (p.grad.data /", "lorenrz inner product, # -1 can become -0.99(no idea!), then arcosh will become", "equation 6 with torch.no_grad(): self.table.weight[0] = 5 # padding idx push it to", "matplotlib matplotlib.use(\"Agg\") # this needs to come before other matplotlib imports import matplotlib.pyplot", "tqdm from collections import Counter from datetime import datetime from tensorboardX import SummaryWriter", "self.arange = np.arange(0, self.n_items) self.cnter = 0 self.batch_size = batch_size def __len__(self): return", "n): # 0 padding, 1 root, we leave those two x = table[i].repeat(len(table)).reshape([len(table),", "self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0, self.n_items) self.cnter = 0", "Ks : - long tensor - size (B, N) - This denotes at", "nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with torch.no_grad():", "/ ( table[:, :1] + 1 ) # diffeomorphism transform to poincare ball", "valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node", "arange = np.random.permutation(self.arange) else: arange = self.arange if has_parent: # if no child", "ui = torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks) # ---------- reshape for", "tn) result = torch.where(tn_expand > 0, result, x) # only update if tangent", "close in R^D. # when calculating the lorenrz inner product, # -1 can", "dim=0, maxnorm=1e2) # otherwise leaves will explode # NOTE: the paper does not", "1 h = (p.grad.data / grad_norm) @ gl proj = ( h -", "forward(self, I, Ks): \"\"\" Using the pairwise similarity matrix, generate the following inputs", "# N, D mask = torch.tensor([0.0] * len(table)) mask[i] = 1 mask =", "1 root, we leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D", "self.cnter = (self.cnter + 1) % self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child", "in range(1, n): # 0 padding, 1 root, we leave those two x", "dists = ( dists.cpu().numpy() ) # arccosh is monotonically increasing, so no need", "{\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for group in self.param_groups: for p in", "-init_range, init_range) # equation 6 with torch.no_grad(): self.table.weight[0] = 5 # padding idx", "# NOTE: the paper does not mention the square part of the equation", "square term in the equation dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1))", "# ========================= models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate = learning_rate if", "os import sys import torch import random import numpy as np from torch", "( h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p )", "sometimes 2 embedding can come very close in R^D. # when calculating the", "diffeomorphism transform to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x =", "that here # and no -dist also, as acosh in m i, -acosh(-l(x,y))", "arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has", "otherwise leaves will explode # NOTE: the paper does not mention the square", "in m i, -acosh(-l(x,y)) is nothing but l(x,y) # print(dists) predicted_parent = np.argmax(dists)", "defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for group in self.param_groups: for", "= dim0 return x # ========================= models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None):", "def __getitem__(self, i): self.cnter = (self.cnter + 1) % self.batch_size I = torch.Tensor([i", "< min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices,", "import os import sys import torch import random import numpy as np from", "i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]]", "__len__(self): return self.n_items def __getitem__(self, i): self.cnter = (self.cnter + 1) % self.batch_size", "+ torch.sinh(tn) * (v / tn) result = torch.where(tn_expand > 0, result, x)", "# and no -dist also, as acosh in m i, -acosh(-l(x,y)) is nothing", "print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) |", "class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0]", "+= actual_parent == predicted_parent count = count / (pair_mat.shape[0] - 1) * 100", "D) dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6,", "( dists.cpu().numpy() ) # arccosh is monotonically increasing, so no need of that", "if learning_rate is not None else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults)", "leaves will explode # NOTE: the paper does not mention the square part", "+ 1e-6, dists) # sometimes 2 embedding can come very close in R^D.", "= set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed `n_items` in a `dim`", "parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child:", "(self.cnter + 1) % self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i]", "lorentz space. \"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim", "not None else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for", "I : - long tensor - size (B,) - This denotes the `i`", "check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items =", "1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm is more than 1", "valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0]", "# print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent == predicted_parent count = count", "range(1, n): # 0 padding, 1 root, we leave those two x =", "tensor - size (B,) - This denotes the `i` used in all equations.", "can come very close in R^D. # when calculating the lorenrz inner product,", "= indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices =", "torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if global", "indices = indices[indices != i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else:", "datetime from tensorboardX import SummaryWriter from torch.utils.data import Dataset, DataLoader import datasets import", "will become nan dists = -arcosh(dists) # print(dists) # ---------- turn back to", "< min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[: self.sample_size]", "np.arange(0, self.n_items) self.cnter = 0 self.batch_size = batch_size def __len__(self): return self.n_items def", "result = m[:, 1:].sum(dim=1) - m[:, 0] return result def tangent_norm(x): # BD", "set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed `n_items` in a `dim` dimensional", "tensorboardX import SummaryWriter from torch.utils.data import Dataset, DataLoader import datasets import pickle import", "np.random.permutation(self.arange) else: arange = self.arange if has_parent: # if no child go for", "B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD, BD -> BD tn", "import torch import random import numpy as np from torch import nn from", "indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table,", "pair_mat.shape[0] for i in range(1, n): # 0 padding, 1 root, we leave", "torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count = 0 table = torch.tensor(table[1:]) n", "optim from tqdm import trange, tqdm from collections import Counter from datetime import", "1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum() if", "def tangent_norm(x): # BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): #", "-1 can become -0.99(no idea!), then arcosh will become nan dists = -arcosh(dists)", "= dists.reshape(B, N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss", "it to corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using the pairwise similarity", "m[:, 1:].sum(dim=1) - m[:, 0] return result def tangent_norm(x): # BD -> B", "BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn)", "p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data)", "embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed `n_items` in", "with torch.no_grad(): self.table.weight[0] = 5 # padding idx push it to corner set_dim0(self.table.weight)", "has_child = (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum() if self.cnter", "= p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm =", "torch.sqrt(x ** 2 - 1)) def lorentz_scalar_product(x, y): # BD, BD -> B", "= exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf,", "= -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self): table =", "-10000.0 dists = lorentz_scalar_product(x, table) + mask dists = ( dists.cpu().numpy() ) #", "norm is > 0 return result def set_dim0(x): x = torch.renorm(x, p=2, dim=0,", "is None: continue B, D = p.size() gl = torch.eye(D, device=p.device, dtype=p.dtype) gl[0,", "ui.reshape(B * N, D) uks = uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui,", "0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device))", "( table[:, :1] + 1 ) # diffeomorphism transform to poincare ball def", "❤️ for embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed", "predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count", "= torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:, 0] = dim0 return x", "else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise", "exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p,", "# if no child go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j =", "Dataset, DataLoader import datasets import pickle import matplotlib matplotlib.use(\"Agg\") # this needs to", "leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask = torch.tensor([0.0]", "indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices", ") # diffeomorphism transform to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self):", "# print(dists) predicted_parent = np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i,", "Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count = 0 table", "the equation dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:, 0] =", "torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0, :] = p[0, :]", "__init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size", "transform to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data", "nearest neighbor sample. - The `j` document must be the first of the", "idea!), then arcosh will become nan dists = -arcosh(dists) # print(dists) # ----------", "random import numpy as np from torch import nn from torch import optim", "padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with torch.no_grad(): self.table.weight[0] = 5 #", "# 0 padding, 1 root, we leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])])", "dtype=p.dtype) gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1,", "import optim from tqdm import trange, tqdm from collections import Counter from datetime", "N, D) uks = uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui, uks) dists", "* -10000.0 dists = lorentz_scalar_product(x, table) + mask dists = ( dists.cpu().numpy() )", "1)) def lorentz_scalar_product(x, y): # BD, BD -> B m = x *", "if no child go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0]", ":] = p[0, :] # no ❤️ for embedding update = set_dim0(update) p.data.copy_(update)", "init_range=0.001): super().__init__() self.n_items = n_items self.dim = dim self.table = nn.Embedding(n_items + 1,", "loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:, :1]", "self.n_items) self.cnter = 0 self.batch_size = batch_size def __len__(self): return self.n_items def __getitem__(self,", "/ grad_norm) @ gl proj = ( h - ( lorentz_scalar_product(p, h) /", "n_items self.dim = dim self.table = nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range,", "= pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0, self.n_items) self.cnter = 0 self.batch_size", "torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1)", "losses Return: - size (B,) - Ranking loss calculated using document to the", "gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm,", "in the equation dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:, 0]", "ui = ui.reshape(B * N, D) uks = uks.reshape(B * N, D) dists", "can become -0.99(no idea!), then arcosh will become nan dists = -arcosh(dists) #", "pickle import matplotlib matplotlib.use(\"Agg\") # this needs to come before other matplotlib imports", "indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[:", "R^D. # when calculating the lorenrz inner product, # -1 can become -0.99(no", "valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no parent and no", "- I : - long tensor - size (B,) - This denotes the", "> 0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum() if self.cnter == 0: arange", "@ gl proj = ( h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p)", "dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists) # sometimes 2 embedding", "return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return table[:, 1:] / ( table[:,", "parent and no child\") indices = arange indices = indices[indices != i] if", "N, D mask = torch.tensor([0.0] * len(table)) mask[i] = 1 mask = mask", "size (B,) - This denotes the `i` used in all equations. - Ks", "np from torch import nn from torch import optim from tqdm import trange,", "(x[:, 1:] ** 2).sum(dim=1)) x[:, 0] = dim0 return x # ========================= models", "uks = self.table(Ks) # ---------- reshape for calculation B, N, D = ui.size()", "to this function. Inputs: - I : - long tensor - size (B,)", "p)) update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update", "2).sum(dim=1)) x[:, 0] = dim0 return x # ========================= models class RSGD(optim.Optimizer): def", "torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1)) x[:, 0] = dim0 return x #", "`n_items` in a `dim` dimensional lorentz space. \"\"\" def __init__(self, n_items, dim, init_range=0.001):", "n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim = dim self.table = nn.Embedding(n_items", "derive it you get a square term in the equation dim0 = torch.sqrt(1", "update[0, :] = p[0, :] # no ❤️ for embedding update = set_dim0(update)", "this needs to come before other matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\")", "dimensional lorentz space. \"\"\" def __init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items", "be the first of the N indices. This is used to calculate the", "x)) def exp_map(x, v): # BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand", "to calculate the losses Return: - size (B,) - Ranking loss calculated using", "lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10):", "RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate = learning_rate if learning_rate is not None", "you get a square term in the equation dim0 = torch.sqrt(1 + (x[:,", "equation but if # you try to derive it you get a square", "dim=1) uks = self.table(Ks) # ---------- reshape for calculation B, N, D =", "exp_map(x, v): # BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1,", "torch.tensor(1.0).to(p.device)) # only normalize if global grad_norm is more than 1 h =", "---------- reshape for calculation B, N, D = ui.size() ui = ui.reshape(B *", "collections import Counter from datetime import datetime from tensorboardX import SummaryWriter from torch.utils.data", "+ mask dists = ( dists.cpu().numpy() ) # arccosh is monotonically increasing, so", "numpy as np from torch import nn from torch import optim from tqdm", "part of the equation but if # you try to derive it you", "> 0 return result def set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) #", "2 embedding can come very close in R^D. # when calculating the lorenrz", "very close in R^D. # when calculating the lorenrz inner product, # -1", "self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self,", "root, we leave those two x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask", "you try to derive it you get a square term in the equation", "self.param_groups: for p in group[\"params\"]: if p.grad is None: continue B, D =", "- This denotes the `i` used in all equations. - Ks : -", "proj = ( h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) *", "push it to corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using the pairwise", "torch.where(is_nan_inf, p, update) update[0, :] = p[0, :] # no ❤️ for embedding", "N, D = ui.size() ui = ui.reshape(B * N, D) uks = uks.reshape(B", "def _test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum()", "# print(I, Ks) return I, torch.Tensor(Ks).long() def recon(table, pair_mat): \"Reconstruction accuracy\" count =", "paper does not mention the square part of the equation but if #", "This denotes the `i` used in all equations. - Ks : - long", "(B,) - This denotes the `i` used in all equations. - Ks :", "is_nan_inf = torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0, :] =", "indices. This is used to calculate the losses Return: - size (B,) -", "defaults=defaults) def step(self): for group in self.param_groups: for p in group[\"params\"]: if p.grad", "1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with torch.no_grad(): self.table.weight[0] =", "import trange, tqdm from collections import Counter from datetime import datetime from tensorboardX", "documents which come from the nearest neighbor sample. - The `j` document must", "# BD -> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD, BD", "<gh_stars>0 import os import sys import torch import random import numpy as np", "** 2 - 1)) def lorentz_scalar_product(x, y): # BD, BD -> B m", "def set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode", "NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks) return", "learning_rate if learning_rate is not None else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params,", "def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size =", "torch.eye(D, device=p.device, dtype=p.dtype) gl[0, 0] = -1 grad_norm = torch.norm(p.grad.data) grad_norm = torch.where(grad_norm", "as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x ** 2 - 1))", "__init__(self, n_items, dim, init_range=0.001): super().__init__() self.n_items = n_items self.dim = dim self.table =", "= (p.grad.data / grad_norm) @ gl proj = ( h - ( lorentz_scalar_product(p,", "so no need of that here # and no -dist also, as acosh", "import SummaryWriter from torch.utils.data import Dataset, DataLoader import datasets import pickle import matplotlib", "will embed `n_items` in a `dim` dimensional lorentz space. \"\"\" def __init__(self, n_items,", "provide to this function. Inputs: - I : - long tensor - size", "and no -dist also, as acosh in m i, -acosh(-l(x,y)) is nothing but", "p[0, :] # no ❤️ for embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module):", "for calculation B, N, D = ui.size() ui = ui.reshape(B * N, D)", "return table[:, 1:] / ( table[:, :1] + 1 ) # diffeomorphism transform", "plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x ** 2 - 1)) def", "normalize if global grad_norm is more than 1 h = (p.grad.data / grad_norm)", "N, D) dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1, torch.ones_like(dists) +", "0 self.batch_size = batch_size def __len__(self): return self.n_items def __getitem__(self, i): self.cnter =", "dists.cpu().numpy() ) # arccosh is monotonically increasing, so no need of that here", "become nan dists = -arcosh(dists) # print(dists) # ---------- turn back to per-sample", "generate the following inputs and provide to this function. Inputs: - I :", "(B,) - Ranking loss calculated using document to the given `i` document. \"\"\"", "# otherwise leaves will explode # NOTE: the paper does not mention the", "pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0, self.n_items) self.cnter =", "import Counter from datetime import datetime from tensorboardX import SummaryWriter from torch.utils.data import", "import Dataset, DataLoader import datasets import pickle import matplotlib matplotlib.use(\"Agg\") # this needs", "0).sum() if self.cnter == 0: arange = np.random.permutation(self.arange) else: arange = self.arange if", "# arccosh is monotonically increasing, so no need of that here # and", "D mask = torch.tensor([0.0] * len(table)) mask[i] = 1 mask = mask *", "n_ks = Ks.size()[1] ui = torch.stack([self.table(I)] * n_ks, dim=1) uks = self.table(Ks) #", "mask[i] = 1 mask = mask * -10000.0 dists = lorentz_scalar_product(x, table) +", "= p[0, :] # no ❤️ for embedding update = set_dim0(update) p.data.copy_(update) class", "arange indices = indices[indices != i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]]", "class Lorentz(nn.Module): \"\"\" This will embed `n_items` in a `dim` dimensional lorentz space.", "print(dists) # ---------- turn back to per-sample shape dists = dists.reshape(B, N) loss", "count += actual_parent == predicted_parent count = count / (pair_mat.shape[0] - 1) *", "denotes at max `N` documents which come from the nearest neighbor sample. -", "dim self.table = nn.Embedding(n_items + 1, dim, padding_idx=0) nn.init.uniform_(self.table.weight, -init_range, init_range) # equation", "= ui.reshape(B * N, D) uks = uks.reshape(B * N, D) dists =", "for group in self.param_groups: for p in group[\"params\"]: if p.grad is None: continue", "nn.init.uniform_(self.table.weight, -init_range, init_range) # equation 6 with torch.no_grad(): self.table.weight[0] = 5 # padding", "the pairwise similarity matrix, generate the following inputs and provide to this function.", "* y result = m[:, 1:].sum(dim=1) - m[:, 0] return result def tangent_norm(x):", "/ lorentz_scalar_product(p, p) ).unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p, p)) update =", "lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] * proj) is_nan_inf = torch.isnan(update) | torch.isinf(update)", "h = (p.grad.data / grad_norm) @ gl proj = ( h - (", "# no ❤️ for embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This", "ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x,", "in self.param_groups: for p in group[\"params\"]: if p.grad is None: continue B, D", "= np.random.permutation(self.arange) else: arange = self.arange if has_parent: # if no child go", "= arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i}", "`i` used in all equations. - Ks : - long tensor - size", "if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i] <", "check = lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix,", "x) # only update if tangent norm is > 0 return result def", "1) % self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum()", "proj) is_nan_inf = torch.isnan(update) | torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0, :]", "before other matplotlib imports import matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x", "torch.isinf(update) update = torch.where(is_nan_inf, p, update) update[0, :] = p[0, :] # no", "table = torch.tensor(table[1:]) n = pair_mat.shape[0] for i in range(1, n): # 0", "N) - This denotes at max `N` documents which come from the nearest", "0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6)) return loss def lorentz_to_poincare(self): table = self.table.weight.data.cpu().numpy() return", "result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn) result =", "* len(table)) mask[i] = 1 mask = mask * -10000.0 dists = lorentz_scalar_product(x,", "0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum() if self.cnter == 0: arange =", "> 0, result, x) # only update if tangent norm is > 0", "# sometimes 2 embedding can come very close in R^D. # when calculating", "= m[:, 1:].sum(dim=1) - m[:, 0] return result def tangent_norm(x): # BD ->", "j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j", "return result def set_dim0(x): x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves", "= valid_idxs[0] min = self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no parent and", "i] if has_child: indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]] else: indices = indices[(self.pairwise_matrix[indices, i]", "= np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count += actual_parent == predicted_parent", "- long tensor - size (B,) - This denotes the `i` used in", "mask = torch.tensor([0.0] * len(table)) mask[i] = 1 mask = mask * -10000.0", "valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs", "used to calculate the losses Return: - size (B,) - Ranking loss calculated", "the equation but if # you try to derive it you get a", "long tensor - size (B,) - This denotes the `i` used in all", "= (self.pairwise_matrix[:, i] > 0).sum() if self.cnter == 0: arange = np.random.permutation(self.arange) else:", "# equation 6 with torch.no_grad(): self.table.weight[0] = 5 # padding idx push it", "* N, D) dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists <= 1, torch.ones_like(dists)", "get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x, x) +", "no ❤️ for embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will", "# padding idx push it to corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\"", "#raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size ] # print(I, Ks)", "table[:, :1] + 1 ) # diffeomorphism transform to poincare ball def get_lorentz_table(self):", "padding idx push it to corner set_dim0(self.table.weight) def forward(self, I, Ks): \"\"\" Using", "= self.pairwise_matrix[j,i] elif has_child: valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]] j = valid_idxs[0] min =", "= torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists) # sometimes 2 embedding can", "= indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[ : self.sample_size", "per-sample shape dists = dists.reshape(B, N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) +", "def forward(self, I, Ks): \"\"\" Using the pairwise similarity matrix, generate the following", "arccosh is monotonically increasing, so no need of that here # and no", "the paper does not mention the square part of the equation but if", "document to the given `i` document. \"\"\" n_ks = Ks.size()[1] ui = torch.stack([self.table(I)]", "self.pairwise_matrix[i,j] else: raise Exception(f\"Node {i} has no parent and no child\") indices =", "i] < min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks = np.concatenate([[j],", "the losses Return: - size (B,) - Ranking loss calculated using document to", "table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D mask = torch.tensor([0.0] * len(table)) mask[i] = 1", "# diffeomorphism transform to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x", "B, N, D = ui.size() ui = ui.reshape(B * N, D) uks =", "= x * y result = m[:, 1:].sum(dim=1) - m[:, 0] return result", "inner product, # -1 can become -0.99(no idea!), then arcosh will become nan", "x[:, 0] = dim0 return x # ========================= models class RSGD(optim.Optimizer): def __init__(self,", "group[\"params\"]: if p.grad is None: continue B, D = p.size() gl = torch.eye(D,", "-arcosh(dists) # print(dists) # ---------- turn back to per-sample shape dists = dists.reshape(B,", "= torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode # NOTE: the", "# print(dists) # ---------- turn back to per-sample shape dists = dists.reshape(B, N)", "Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size", "self.batch_size I = torch.Tensor([i + 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent =", "nan dists = -arcosh(dists) # print(dists) # ---------- turn back to per-sample shape", "from torch import nn from torch import optim from tqdm import trange, tqdm", "need of that here # and no -dist also, as acosh in m", "the lorenrz inner product, # -1 can become -0.99(no idea!), then arcosh will", "== 0: arange = np.random.permutation(self.arange) else: arange = self.arange if has_parent: # if", "1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix", "shape dists = dists.reshape(B, N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6))", "step(self): for group in self.param_groups: for p in group[\"params\"]: if p.grad is None:", "pairwise similarity matrix, generate the following inputs and provide to this function. Inputs:", "= tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn)", "no child go for parent valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min", "tensor - size (B, N) - This denotes at max `N` documents which", "actual_parent, i, end=\"\\n\\n\") count += actual_parent == predicted_parent count = count / (pair_mat.shape[0]", "size (B, N) - This denotes at max `N` documents which come from", "= np.argmax(dists) actual_parent = np.argmax(pair_mat[:, i]) # print(predicted_parent, actual_parent, i, end=\"\\n\\n\") count +=", "for embedding update = set_dim0(update) p.data.copy_(update) class Lorentz(nn.Module): \"\"\" This will embed `n_items`", "-> B return torch.sqrt(lorentz_scalar_product(x, x)) def exp_map(x, v): # BD, BD -> BD", "update if tangent norm is > 0 return result def set_dim0(x): x =", ").unsqueeze(1) * p ) # print(p, lorentz_scalar_product(p, p)) update = exp_map(p, -group[\"learning_rate\"] *", "\"Reconstruction accuracy\" count = 0 table = torch.tensor(table[1:]) n = pair_mat.shape[0] for i", "1, torch.ones_like(dists) + 1e-6, dists) # sometimes 2 embedding can come very close", "self.pairwise_matrix = pairwise_matrix self.n_items = pairwise_matrix.shape[0] self.sample_size = sample_size self.arange = np.arange(0, self.n_items)", "has no parent and no child\") indices = arange indices = indices[indices !=", "torch.no_grad(): self.table.weight[0] = 5 # padding idx push it to corner set_dim0(self.table.weight) def", "torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode # NOTE: the paper", "This will embed `n_items` in a `dim` dimensional lorentz space. \"\"\" def __init__(self,", "__init__(self, params, learning_rate=None): learning_rate = learning_rate if learning_rate is not None else 0.01", "y): # BD, BD -> B m = x * y result =", "self.table(Ks) # ---------- reshape for calculation B, N, D = ui.size() ui =", "in all equations. - Ks : - long tensor - size (B, N)", "0 table = torch.tensor(table[1:]) n = pair_mat.shape[0] for i in range(1, n): #", "of the N indices. This is used to calculate the losses Return: -", "x) + 1.0 return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix", "= 0 table = torch.tensor(table[1:]) n = pair_mat.shape[0] for i in range(1, n):", "_test_table(self): x = self.table.weight.data check = lorentz_scalar_product(x, x) + 1.0 return check.cpu().numpy().sum() class", "= indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]] indices = indices[: self.sample_size] #print(indices) #raise NotImplementedError() Ks", "all equations. - Ks : - long tensor - size (B, N) -", "= torch.norm(p.grad.data) grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device)) # only normalize if", "torch.cosh(tn) * x + torch.sinh(tn) * (v / tn) result = torch.where(tn_expand >", "the following inputs and provide to this function. Inputs: - I : -", "as np from torch import nn from torch import optim from tqdm import", "1:] / ( table[:, :1] + 1 ) # diffeomorphism transform to poincare", "1:].sum(dim=1) - m[:, 0] return result def tangent_norm(x): # BD -> B return", "else 0.01 defaults = {\"learning_rate\": learning_rate} super().__init__(params, defaults=defaults) def step(self): for group in", "loss calculated using document to the given `i` document. \"\"\" n_ks = Ks.size()[1]", "get a square term in the equation dim0 = torch.sqrt(1 + (x[:, 1:]", "matplotlib.pyplot as plt plt.style.use(\"ggplot\") def arcosh(x): return torch.log(x + torch.sqrt(x ** 2 -", "the square part of the equation but if # you try to derive", "+ 1]).squeeze().long() has_child = (self.pairwise_matrix[i] > 0).sum() has_parent = (self.pairwise_matrix[:, i] > 0).sum()", "to poincare ball def get_lorentz_table(self): return self.table.weight.data.cpu().numpy() def _test_table(self): x = self.table.weight.data check", "embed `n_items` in a `dim` dimensional lorentz space. \"\"\" def __init__(self, n_items, dim,", "= ( h - ( lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p) ).unsqueeze(1) * p", "= uks.reshape(B * N, D) dists = -lorentz_scalar_product(ui, uks) dists = torch.where(dists <=", "p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode # NOTE: the paper does", "x # ========================= models class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate = learning_rate", "torch import optim from tqdm import trange, tqdm from collections import Counter from", "to per-sample shape dists = dists.reshape(B, N) loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1)", "= arange[self.pairwise_matrix[arange, i].nonzero()[0]] j = valid_idxs[0] min = self.pairwise_matrix[j,i] elif has_child: valid_idxs =", "return check.cpu().numpy().sum() class Graph(Dataset): def __init__(self, pairwise_matrix, batch_size, sample_size=10): self.pairwise_matrix = pairwise_matrix self.n_items", "= learning_rate if learning_rate is not None else 0.01 defaults = {\"learning_rate\": learning_rate}", "-0.99(no idea!), then arcosh will become nan dists = -arcosh(dists) # print(dists) #", "super().__init__() self.n_items = n_items self.dim = dim self.table = nn.Embedding(n_items + 1, dim,", "class RSGD(optim.Optimizer): def __init__(self, params, learning_rate=None): learning_rate = learning_rate if learning_rate is not" ]
[ "{\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES)", "json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in", "with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def", "@pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add(", "http import HTTPStatus from unittest.mock import MagicMock, patch import pytest import responses from", "of the samples inserted into mongo, currently only uses the number # of", "mongo, currently only uses the number # of priority samples therefore PRIORITY_SAMPLES needs", "used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events", "wants yield copy.deepcopy(CENTRES) # clear up after the fixture is used with app.app_context():", "# inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"],", "[ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\":", "inserted_events # clear up after the fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture", "}, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage = {", "\"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ {", "f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses,", "Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(),", "EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\",", "def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try:", "mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield", "return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def", "@pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples", "with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin()", "\"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\",", "cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK", "print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine,", "MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data():", "\"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\":", ") insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app,", "EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, )", "insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine):", "biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\":", "FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode,", "app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that", "app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name):", "status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID:", "import copy import os from http import HTTPStatus from unittest.mock import MagicMock, patch", "test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table", "mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response,", "\"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, },", "copy.deepcopy(samples), inserted_samples # clear up after the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture", "change it however it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up after the", "@pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE", "( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart", "def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return", "app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete())", "DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage =", "return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return", "HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode,", "mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def", "after the fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context():", "responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def", "cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"], mlwh_sql_engine,", "\"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, {", "delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(),", "= get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection:", "= get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table =", "def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try:", "PRIORITY_SAMPLES needs to be <= SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] =", "with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres", "app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app,", "connection.execute(table.delete()) # delete all rows from table first print(\"Inserting MLWH test data\") connection.execute(table.insert(),", "as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection:", "{ \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\",", "def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return", "} @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"]", "that the test change it however it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear", "the _id of the samples inserted into mongo, currently only uses the number", "def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a", "destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode)", "app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"],", "samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield", "of so that the test change it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events", "= app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after the fixture", "{}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES", "= app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a copy of so that the", "PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart", "@pytest.fixture def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA ************************** # @pytest.fixture def", "mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app,", "print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine,", "import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, )", "yield copy.deepcopy(SOURCE_PLATES) # clear up after the fixture is used with app.app_context(): source_plates_collection.delete_many({})", "@pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def", "subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as", "from lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import (", "from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID,", "import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import", "return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def", "connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as", "app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture", "PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker", "SOURCE_PLATES @pytest.fixture def app(): # set the 'EVE_SETTINGS' env variable to easily switch", "run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app,", "priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) #", "app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples = samples # create a", "\"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\",", "fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection =", "app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"])", "\"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ {", "SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection =", "mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example =", "import SOURCE_PLATES @pytest.fixture def app(): # set the 'EVE_SETTINGS' env variable to easily", "from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table", "copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with the _id of the samples inserted", "HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses):", "\"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\":", "= f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app,", "_ = centres_collection.insert_many(CENTRES) # yield a copy so that the test change it", "= get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data():", "yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with", "cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED", "app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) #", "event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage = { \"event\": {", "def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture", "\"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return", "}, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples,", "cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode,", "with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value =", "so that the test change it however it wants yield copy.deepcopy(SAMPLES), inserted_samples #", "app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"],", "def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples", "table first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table", "], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples):", "after the fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock", "build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES", "\"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", },", "try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"])", "_ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that the test change it", "samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear", "cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples", "up after the fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with", "app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app,", "} } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response):", "app = create_app() yield app @pytest.fixture def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app):", "samples with the _id of the samples inserted into mongo, currently only uses", "FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id):", "finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection()", "from unittest.mock import MagicMock, patch import pytest import responses from lighthouse import create_app", "priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with the _id of the", "load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture", "get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine,", "FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from", "MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data():", "app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"])", "def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage = { \"event\":", "priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with", "delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture", "copy.deepcopy(SAMPLES), inserted_samples # clear up after the fixture is used with app.app_context(): samples_collection.delete_many({})", "\"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete():", "source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy", "app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response,", "destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id):", "import pytest import responses from lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED", "\"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage = { \"event\": {", "tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh", "app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield app @pytest.fixture def client(app): return", "# yield a copy so that the test change it however it wants", "app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def", "cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return", "mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data()", "table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete())", "status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys():", "app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine,", "centres_collection.insert_many(CENTRES) # yield a copy so that the test change it however it", "return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA ************************** #", "centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a copy so that the", "mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh(", "mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine,", "\"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", },", "app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally:", "\"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\",", "\"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\",", "a copy of that the test change it however it wants yield copy.deepcopy(SOURCE_PLATES)", "the test change it however it wants yield copy.deepcopy(CENTRES) # clear up after", "the test change it however it wants yield copy.deepcopy(samples), inserted_samples # clear up", "from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from lighthouse.types import EventMessage", "responses from lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import", "with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"],", "= \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode)", "app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"],", "baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ):", "{ \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\",", "= cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"],", "example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app,", "= copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with the _id of the samples", "EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\",", "\"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture", "{ \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } }", "return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with", "FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql", "def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status,", "yield a copy so that the test change it however it wants yield", "return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app,", "MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app,", "MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup", "with app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a copy so", "yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url =", "the fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses", "mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url", "def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA", "EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES", "connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine,", "def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app,", "yield finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _,", "return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture", "dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create):", ") yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url =", "FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection", "with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples =", "delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"])", "run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture", "with app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy of", "clear up after the fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app):", "}, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\":", "connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data()", "mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", }", "app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\":", "destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, )", "}, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage = {", "\"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\",", "EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture", "is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates", "def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data()", "tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events", "MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table", "baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"])", "app(): # set the 'EVE_SETTINGS' env variable to easily switch to the testing", "up after the fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily", "labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS", "], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content:", "cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\",", "source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status,", "\"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return", "however it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up after the fixture is", "mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture", "with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a copy of", "def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh(", "centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) #", "run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix", "run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode):", "\"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage = { \"event\": {", "try: yield finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples):", "app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with", "create a copy so that the test can change it however it needs", "MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine,", "app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app,", "connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse", ") insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app, data,", "with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples():", "@pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture", "EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\",", "= rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a", "up after the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine):", "def app(): # set the 'EVE_SETTINGS' env variable to easily switch to the", "rows from table first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine,", "\"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, {", "message_source_all_negative(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\",", "event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine,", "\"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", },", "): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def", "FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import", "data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"])", "for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url", "source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection", "app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES)", "@pytest.fixture def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield", "{ \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [],", "rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\":", "connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def", "from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE,", "however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the fixture is", "with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all rows from table first print(\"Inserting", "after the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def", "fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection =", "change it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the", "run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def", "with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up", "copy so that the test can change it however it needs priority_samples =", "\"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def", "= create_app() yield app @pytest.fixture def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with", "# clear up after the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app,", "it however it wants yield copy.deepcopy(CENTRES) # clear up after the fixture is", "def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app):", "patch import pytest import responses from lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES,", "used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from HTTP calls.", "\"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture", "is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally: with app.app_context():", "app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url,", "\"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture", "\"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples,", "get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all rows from table", "\"test.py\" app = create_app() yield app @pytest.fixture def client(app): return app.test_client() @pytest.fixture def", "priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after the", "mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete", "with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy of", "it wants yield copy.deepcopy(samples), inserted_samples # clear up after the fixture is used", "import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import", "# delete all rows from table first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data)", "mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with", "source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) #", "mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data()", "= source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that the test change it however", "# yield a copy of that the test change it however it wants", "def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET,", "_, samples = samples # create a copy so that the test can", "delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"])", "connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test data\")", "app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture", "HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def", "a copy so that the test change it however it wants yield copy.deepcopy(CENTRES)", "mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses,", "def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection:", "PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import", "ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"],", "@pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\",", "@pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\",", "+ MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"],", "@pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode):", "COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from", "app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine):", "get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete())", "MagicMock, patch import pytest import responses from lighthouse import create_app from lighthouse.constants.events import", "None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url,", "@pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response,", "return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url", "def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not", "cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app,", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts", "samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates", "def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a", "app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy of so", "mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table =", "a copy so that the test can change it however it needs priority_samples", "baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples =", "with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events =", "load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import", "the fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection", "app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app,", "@pytest.fixture def message_unknown(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\",", "import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from", "cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url,", "that the test change it however it wants yield copy.deepcopy(SOURCE_PLATES) # clear up", "def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app,", "= get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table =", "create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context():", "HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def", "MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES", "after the fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context():", "EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture", "\"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def", "mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh(", "from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): #", "get_table from lighthouse.messages.message import Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response", "tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture", "def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection", "# clear up after the fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def", "run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, )", "destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\":", "def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode)", "inserted_samples = samples_collection.insert_many(samples) # yield a copy of so that the test change", "# set the 'EVE_SETTINGS' env variable to easily switch to the testing environment", "os from http import HTTPStatus from unittest.mock import MagicMock, patch import pytest import", "PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import", "table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all", "import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import", "@pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app,", "= get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table =", "\"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content)", "wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the fixture is used with", "\"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def", "source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that the", "yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix]", "@pytest.fixture def app(): # set the 'EVE_SETTINGS' env variable to easily switch to", "{ \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {},", "to the testing environment when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app =", "lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from", "status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS # **********************", "connection: connection.execute(table.delete()) # delete all rows from table first print(\"Inserting MLWH test data\")", "def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add(", "samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA **************************", "import os from http import HTTPStatus from unittest.mock import MagicMock, patch import pytest", "finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin()", "}, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture", "copy so that the test change it however it wants yield copy.deepcopy(CENTRES) #", "@pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\"", "with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ =", "import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE,", "from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES,", "MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def", ") yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"])", "\"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps", "( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def", "PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from", "the fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection", "events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock()", "import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import", "@pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app,", "insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type:", "source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate(", "@pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status():", "fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app,", "app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return", "is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events", "app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a copy of so that the test", "}, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\":", "} ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def", "def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown():", "= app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy of so that the", "import MagicMock, patch import pytest import responses from lighthouse import create_app from lighthouse.constants.events", "samples inserted into mongo, currently only uses the number # of priority samples", "mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples", "json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id,", "responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": {", "it however it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up after the fixture", "= MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id,", "= app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a copy so that the test", "dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def", "of so that the test change it however it wants yield copy.deepcopy(SAMPLES), inserted_samples", "@pytest.fixture def priority_samples(app, samples): _, samples = samples # create a copy so", "enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield", "f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url,", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"],", "def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return", "however it wants yield copy.deepcopy(SOURCE_PLATES) # clear up after the fixture is used", "samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES for count, priority_sample in enumerate(priority_samples):", "from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from", "it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the fixture is used", "= get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture", "mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture", "event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table =", "subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table", "as connection: connection.execute(table.delete()) # delete all rows from table first print(\"Inserting MLWH test", "json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return", "status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK", "def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "= { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\":", "MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES,", "_ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after the fixture is used", "app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine,", "def message_source_all_negative(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\":", "@pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates(", "app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy of so", "EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app):", "\"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\",", "mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status):", "insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield", "event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(),", "samples # create a copy so that the test can change it however", "connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting", "\"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ],", "@pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"])", ") yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name)", "from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app,", "delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app,", ") insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app,", "[], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete(): message_content:", "test change it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after", "body = [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body,", "priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples", "data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table =", "= mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ):", "f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses,", "MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response,", "try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"],", "@pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] =", "= get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete())", "try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], )", "tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): # set the 'EVE_SETTINGS' env variable to", "message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\":", "mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine,", "from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS,", "} return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage = { \"event\": { \"uuid\":", "role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete())", "responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for", "MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data()", "def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock()", "samples_collection.insert_many(samples) # yield a copy of so that the test change it however", "\"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE", "app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"])", "uses the number # of priority samples therefore PRIORITY_SAMPLES needs to be <=", "app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app,", "from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import", "def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "message_unknown(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\",", "{ \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\":", "source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response", "delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine,", "inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], )", "number # of priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES for", "@pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\")", "def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture", "source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status,", "create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import", "\"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\":", "clear up after the fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app):", "clear up after the fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app):", "samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy of so that", "test can change it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the", "currently only uses the number # of priority samples therefore PRIORITY_SAMPLES needs to", "= f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST,", "= events_collection.insert_many(PLATE_EVENTS) # yield a copy of so that the test change it", "mocked_responses(): \"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield", "mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return", "app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"])", "insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture", "{ \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [", "app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"])", "app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel", "after the fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context():", "= MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel", "the test change it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up", "mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try:", "tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from", "used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples", "roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table", "change it however it wants yield copy.deepcopy(samples), inserted_samples # clear up after the", "\"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\":", "mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"])", "the test can change it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update", "to be <= SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts", "# clear up after the fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def", "calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url", "be <= SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context():", "SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA ************************** # @pytest.fixture", "mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps @pytest.fixture", "baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield", "\"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\":", "return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME:", "that the test change it however it wants yield copy.deepcopy(CENTRES) # clear up", "\"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode,", "connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table =", "connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"])", "the samples inserted into mongo, currently only uses the number # of priority", "finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"])", "event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def", "so that the test change it however it wants yield copy.deepcopy(samples), inserted_samples #", "\"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\":", "mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test", "@pytest.fixture def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield", "MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine,", "@pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture", "return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app):", "@pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def", "@pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context():", "def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return", "@pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield", "# update the priority samples with the _id of the samples inserted into", "used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _", "count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts", "a copy of so that the test change it however it wants yield", "with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"])", "test change it however it wants yield copy.deepcopy(SOURCE_PLATES) # clear up after the", "{ \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\",", "destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection =", "finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from lighthouse.types", "HTTPStatus from unittest.mock import MagicMock, patch import pytest import responses from lighthouse import", "delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() #", "return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with", "return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\",", "\"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\":", "def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def", "easily switch to the testing environment when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\"", "labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ]", "baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status():", "mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status", "len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, )", "\"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture", "insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"])", "import responses from lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields", "however it wants yield copy.deepcopy(samples), inserted_samples # clear up after the fixture is", "return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\",", "of so that the test change it however it wants yield copy.deepcopy(samples), inserted_samples", "@pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def", "environment when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield app", "from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from", "app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], )", "status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url", ") from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import", "yield app @pytest.fixture def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return", "def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\", \"location_barcode\":", "in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = (", "mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app,", "mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture", "cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield", "with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine):", "from table first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name):", ") yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\",", "inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy of so that the test change", "\"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\",", "def priority_samples(app, samples): _, samples = samples # create a copy so that", "rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a copy", "responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response,", "# inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"],", "testing environment when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield", "Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES", "tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart", "@pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context():", "app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after", "def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data()", "mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\":", "FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode,", "data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin()", "def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table", "WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture", "cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield", "with the _id of the samples inserted into mongo, currently only uses the", "mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh(", "return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _ =", "centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url =", ") insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh(", "source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode,", "client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def", "with responses.RequestsMock() as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\"", "import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells", "the test change it however it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up", "data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) #", "EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app):", "yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def", "create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER,", "labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\",", "\"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content)", "env variable to easily switch to the testing environment when creating an app", "\"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture", "cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples", "@pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def", "= { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\":", "it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with the _id", "{ \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\":", "\"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\",", "MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture", "priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES for count, priority_sample in", "def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data()", "events_collection.insert_many(PLATE_EVENTS) # yield a copy of so that the test change it however", "= { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\":", "connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(),", "patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value = mocked_channel yield mocked_channel @pytest.fixture def cherrytrack_mock_run_info(", "from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): # set the 'EVE_SETTINGS' env variable", "that the test can change it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) #", "app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete())", "yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the fixture is used with app.app_context():", "mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"])", "data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection:", "app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def", "type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app,", "delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH", "test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(),", "message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\":", "yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app,", "cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture", "priority_samples(app, samples): _, samples = samples # create a copy so that the", "\"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage", "yield @pytest.fixture def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER:", "SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from", "# inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"],", "delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as", "mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app,", "insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() def insert_into_mlwh(app, data, mlwh_sql_engine,", "json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids(): return COG_UK_IDS #", "import HTTPStatus from unittest.mock import MagicMock, patch import pytest import responses from lighthouse", "events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table", "app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally:", "is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from HTTP", "app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after the fixture is", "mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app,", "yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return", "so that the test can change it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES)", "= priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after the fixture is used with", "return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage", "change it however it wants yield copy.deepcopy(CENTRES) # clear up after the fixture", "inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], )", "f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return", "@pytest.fixture def clear_events(app): try: yield finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture", "inserted_samples # clear up after the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def", "clear up after the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode,", "the fixture is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data():", "it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up after the fixture is used", "used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"])", "delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh(", "delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine,", "def message_source_complete(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\":", "app @pytest.fixture def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\":", "mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status,", "yield copy.deepcopy(samples), inserted_samples # clear up after the fixture is used finally: samples_collection.delete_many({})", "app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally:", "tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES,", "app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples = samples", "@pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker =", "from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from", "build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context():", "app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"],", ") insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], )", "load_sql_server_script(\"tests/data/dart/schema.sql\") @pytest.fixture def dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES", "create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from lighthouse.types import", "connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(),", "fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection =", "yield a copy of so that the test change it however it wants", "{ \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {},", "} return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples):", "not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST,", "the number # of priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES", "\"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\":", "plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app):", "\"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode,", "# of priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES for count,", "app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response,", "app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore", "tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh", "def clear_events(app): try: yield finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def", "test change it however it wants yield copy.deepcopy(samples), inserted_samples # clear up after", "with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")}", "test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with", "copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the fixture is used with app.app_context(): events_collection.delete_many({})", "it wants yield copy.deepcopy(CENTRES) # clear up after the fixture is used with", "insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield", "# clear up after the fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def", "cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield", "events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples = samples #", "creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield app @pytest.fixture def", "inserted_samples # clear up after the fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture", "table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all rows", "used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally: with app.app_context(): events_collection", "create_app() yield app @pytest.fixture def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context():", "mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine,", "is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples", "samples): _, samples = samples # create a copy so that the test", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try:", "wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up after the fixture is used with", "@pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel =", "MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import", "Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"])", "baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\"", "plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy", "it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with", "cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture", "): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def", "# type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh(", ") yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if", "\"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def plates_lookup_with_samples(samples, priority_samples): return", "up after the fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try:", "event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as", "update the priority samples with the _id of the samples inserted into mongo,", "mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock() mocked_broker.__enter__.return_value", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], )", "fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from", "try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) #", "the test change it however it wants yield copy.deepcopy(SOURCE_PLATES) # clear up after", "app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy of so that the test", "cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses,", "def plates_lookup_with_samples(samples, priority_samples): return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def", "first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table =", "app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app,", "only uses the number # of priority samples therefore PRIORITY_SAMPLES needs to be", "cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine):", "change it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples", "\"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\",", "the testing environment when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app()", "\"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage = { \"event\":", "tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): # set", "def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a", "lighthouse.messages.message import Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells", "import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME,", "example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data()", "from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from", "= len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status,", ") yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"])", "into mongo, currently only uses the number # of priority samples therefore PRIORITY_SAMPLES", "} return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage = { \"event\": { \"uuid\":", "mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"])", "rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): # set the 'EVE_SETTINGS' env", "import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def", "dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content:", "all rows from table first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app,", "therefore PRIORITY_SAMPLES needs to be <= SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID]", "app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with", "return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode,", "lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID,", ") yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture", "def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET,", "message_source_complete(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\",", "CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import (", "MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data()", "it wants yield copy.deepcopy(SOURCE_PLATES) # clear up after the fixture is used with", "\"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\",", "app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES)", "needs to be <= SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count]", "mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status,", "delete all rows from table first print(\"Inserting MLWH test data\") connection.execute(table.insert(), data) def", "PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app():", "connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield", "MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"],", "= app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that the test", "samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(SAMPLES) # yield a copy", "mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine,", "# create a copy so that the test can change it however it", "mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app,", "copy of so that the test change it however it wants yield copy.deepcopy(samples),", "return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage = { \"event\": { \"uuid\":", "return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context():", "\"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage = { \"event\":", "\"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", },", "wants yield copy.deepcopy(SOURCE_PLATES) # clear up after the fixture is used with app.app_context():", "app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def", "import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message", "import Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import", "\"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\",", "num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix],", "from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message", "with app.app_context(): source_plates_collection = app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of", "try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"],", "it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear up after the fixture", "# inserts insert_into_mlwh( app, example[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, example[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"],", "an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield app @pytest.fixture def client(app):", "return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status(): return HTTPStatus.CREATED @pytest.fixture", "delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"])", "connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test", "mocked_responses.add( responses.GET, source_plates_url, json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode,", "samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({})", "= [ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK)", "@pytest.fixture def event_wh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage = {", "finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples", "create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"MLWH_DB\"]) @pytest.fixture def dart_connection(app): return create_dart_connection() @pytest.fixture def dart_schema_create(app): with app.app_context(): load_sql_server_script(\"tests/data/dart/schema.sql\")", "yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\"", "\"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, },", "connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"])", "def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel = MagicMock()", "\"sample\", \"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\",", "app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _", "tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack", "'EVE_SETTINGS' env variable to easily switch to the testing environment when creating an", "samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples =", "os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield app @pytest.fixture def client(app): return app.test_client()", "delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] +", ") mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status(): return HTTPStatus.OK", "= samples_collection.insert_many(samples) # yield a copy of so that the test change it", "is used finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine,", "Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\":", "lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from", "destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, )", "mocked_channel @pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\"", "events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy of so that", "app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy of so that the test", "insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield", "\"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, {", "of that the test change it however it wants yield copy.deepcopy(SOURCE_PLATES) # clear", "print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"])", "MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], )", "build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id,", "PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\":", "cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\" return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id,", "@pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app,", "connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with event_wh_sql_engine.begin() as connection: print(\"Inserting Events", "( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example, ) from tests.fixtures.data.plate_events import PLATE_EVENTS", "variable to easily switch to the testing environment when creating an app os.environ[\"EVE_SETTINGS\"]", "get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete())", "up after the fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with", "mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine,", "HTTPStatus.CREATED @pytest.fixture def cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url =", "cherrytrack_mock_source_plates( app, mocked_responses, source_barcode, destination_barcode, cherrytrack_source_plates_response, cherrytrack_mock_source_plates_status, ): source_plates_url = f\"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}\" mocked_responses.add( responses.GET,", "PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\": \"sample\", \"friendly_name\":", "def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data():", "that the test change it however it wants yield copy.deepcopy(samples), inserted_samples # clear", "def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE,", "mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"],", "app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine,", "source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def samples_from_cherrytrack_into_mongo(app, source_barcode): try: samples =", "cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture", "\"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\",", "copy.deepcopy(CENTRES) # clear up after the fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture", "app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with", "yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app,", "get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"]) events_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine,", "clear_events(app): try: yield finally: with app.app_context(): events_collection = app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app,", "app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"],", "is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add(", "] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return SAMPLES_FOR_MLWH_UPDATE @pytest.fixture def cog_uk_ids():", "yield a copy of that the test change it however it wants yield", "import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from", "baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is not None:", "lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script", "yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [ {", "message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\":", "# clear up after the fixture is used with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def", "yield copy.deepcopy(CENTRES) # clear up after the fixture is used with app.app_context(): centres_collection.delete_many({})", "connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return", "as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body =", "= get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all rows from", "@pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status): for centre_prefix in baracoda_mock_responses.keys(): if baracoda_mock_responses[centre_prefix] is", "\"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\",", "# ********************** WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine,", "= ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture", "for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples", "= app.data.driver.db.events events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples = samples # create", "responses.RequestsMock() as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body", "copy of that the test change it however it wants yield copy.deepcopy(SOURCE_PLATES) #", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"],", "\"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\": \"robot\", \"subject_type\": \"robot\", \"friendly_name\": \"robot-serial\", \"uuid\":", "priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\",", "def message_unknown(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\":", "after the fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield", "def cherrytrack_run_info_response(run_id): return { \"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER:", "def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response): cherrytrack_destination_plate_response[\"wells\"][0][\"destination_coordinate\"] = \"H12\"", ") from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import create_dart_connection from lighthouse.helpers.mysql import create_mysql_connection_engine,", "clear up after the fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app):", "app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally: with app.app_context(): events_collection = app.data.driver.db.events", "from http import HTTPStatus from unittest.mock import MagicMock, patch import pytest import responses", "}, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative():", "yield copy.deepcopy(SAMPLES), inserted_samples # clear up after the fixture is used with app.app_context():", "def cog_uk_ids(): return COG_UK_IDS # ********************** WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app,", "source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that the test change it however it", "= f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return", "@pytest.fixture def client(app): return app.test_client() @pytest.fixture def biosero_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")}", "= samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples) yield priority_samples #", "rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url = f\"{app.config['LABWHERE_URL']}/api/labwares_by_barcode\" body = [", "FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER, FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME, FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER, FIELD_CHERRYTRACK_USER_ID, FIELD_SAMPLE_ID, ) from lighthouse.db.dart import load_sql_server_script from lighthouse.helpers.dart import", "# clear up after the fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def", "@pytest.fixture def mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try:", "centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a copy", "change it however it wants yield copy.deepcopy(SOURCE_PLATES) # clear up after the fixture", "insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh(", "as connection: print(\"Inserting Events Warehouse test data\") connection.execute(role_types_table.insert(), EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"])", "<= SAMPLES for count, priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection", "@pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECTS_TABLE\"]) roles_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLES_TABLE\"])", "app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with event_wh_sql_engine.begin()", "DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE,", "\"robot\", \"friendly_name\": \"robot-serial\", \"uuid\": \"00000000-1111-2222-3333-555555555557\", }, ], \"metadata\": {}, }, \"lims\": \"LH_TEST\", }", "\"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": \"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\":", "https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps @pytest.fixture def labwhere_samples_simple(app, mocked_responses): labwhere_url =", "lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres", "insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app,", "mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try:", "inserted into mongo, currently only uses the number # of priority samples therefore", "table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\")", "insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) yield finally: delete_data() @pytest.fixture def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine):", "f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url, json=cherrytrack_destination_plate_response, status=cherrytrack_mock_destination_plate_status, ) yield @pytest.fixture def cherrytrack_run_info_response(run_id): return {", "samples_collection.insert_many(SAMPLES) # yield a copy of so that the test change it however", "from tests.fixtures.data.plate_events import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES", "cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ): destination_plate_url = f\"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}\" mocked_responses.add( responses.GET, destination_plate_url,", "create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"], app.config[\"EVENTS_WH_DB\"]) @pytest.fixture def message_unknown(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\",", "pytest import responses from lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from", "with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\"", "the 'EVE_SETTINGS' env variable to easily switch to the testing environment when creating", "f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" ) mocked_responses.add( responses.POST, baracoda_url, json=baracoda_mock_responses[centre_prefix], status=baracoda_mock_status, ) yield @pytest.fixture def cherrytrack_mock_source_plates_status():", "import PLATE_EVENTS from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples", "the fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with app.app_context(): source_plates_collection", "{}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage =", "\"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\":", "with app.app_context(): centres_collection.delete_many({}) @pytest.fixture def samples(app): with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples =", "[ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\", }, { \"role_type\":", "fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally: with", "def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting", "it however it wants yield copy.deepcopy(SOURCE_PLATES) # clear up after the fixture is", "with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally: with app.app_context(): events_collection =", "source_barcode): try: samples = rows_for_samples_in_cherrytrack(source_barcode) with app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples)", "\"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\": \"00000000-1111-2222-3333-555555555556\",", "lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app): with app.app_context(): centres_collection =", "plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"],", "table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app,", "however it wants yield copy.deepcopy(CENTRES) # clear up after the fixture is used", "get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def", "@pytest.fixture def cherrytrack_mock_run_info( app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status ): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add(", "{}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_all_negative(): message_content: EventMessage =", "inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], )", "\"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\", \"subject_type\":", "source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example", "@pytest.fixture def message_source_all_negative(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES,", "from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from", "dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app): return", "clear up after the fixture is used with app.app_context(): events_collection.delete_many({}) @pytest.fixture def mocked_responses():", "\"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [ { \"role_type\": \"sample\",", "mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine,", "copy import os from http import HTTPStatus from unittest.mock import MagicMock, patch import", "app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"], mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app,", "@pytest.fixture def cherrytrack_mock_run_info_status(): return HTTPStatus.OK @pytest.fixture def cherrytrack_mock_destination_plate_status(): return HTTPStatus.OK @pytest.fixture def baracoda_mock_status():", "app.config[\"MLWH_STOCK_RESOURCES_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() #", "mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh(", "\"metadata\": {}, }, \"lims\": \"LH_TEST\", } return Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage", "delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) #", "import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): # set the", "@pytest.fixture def message_source_complete(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_COMPLETED,", "json=cherrytrack_source_plates_response, status=cherrytrack_mock_source_plates_status, ) yield @pytest.fixture def cherrytrack_mock_destination_plate( app, mocked_responses, destination_barcode, cherrytrack_destination_plate_response, cherrytrack_mock_destination_plate_status, ):", "unittest.mock import MagicMock, patch import pytest import responses from lighthouse import create_app from", "app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], # type: ignore mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"study\"],", "samples = samples # create a copy so that the test can change", "connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def", "return COG_UK_IDS # ********************** WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app,", "app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a copy so that", "{ \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\": PE_BECKMAN_SOURCE_ALL_NEGATIVES, \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [", "mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh(", "mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all rows from table first print(\"Inserting MLWH", "DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def", "copy.deepcopy(SOURCE_PLATES) # clear up after the fixture is used with app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture", "SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES @pytest.fixture def app(): # set the 'EVE_SETTINGS'", "up after the fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture def source_plates(app): with", "app.config[\"MLWH_STUDY_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"stock_resource\"], mlwh_sql_engine, app.config[\"MLWH_STOCK_RESOURCES_TABLE\"], ) yield finally: delete_data() @pytest.fixture def", "from lighthouse.messages.message import Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from", "return cherrytrack_destination_plate_response @pytest.fixture def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode): return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode) @pytest.fixture def", "FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id)", "baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\" )", "lighthouse.helpers.mysql import create_mysql_connection_engine, get_table from lighthouse.messages.message import Message from lighthouse.types import EventMessage from", "to easily switch to the testing environment when creating an app os.environ[\"EVE_SETTINGS\"] =", "can change it however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority", "Message from lighthouse.types import EventMessage from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response", "\"CPA\", } } @pytest.fixture def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id): return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id) def", "import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example,", "get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_ROLE_TYPES_TABLE\"]) def delete_event_warehouse_data(): with", "priority_samples # clear up after the fixture is used with app.app_context(): priority_samples_collection.delete_many({}) @pytest.fixture", "def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a", "= app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy of so that the", "so that the test change it however it wants yield copy.deepcopy(CENTRES) # clear", "priority_samples_collection.insert_many(priority_samples) yield priority_samples # clear up after the fixture is used with app.app_context():", "switch to the testing environment when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app", "the priority samples with the _id of the samples inserted into mongo, currently", "COG_UK_IDS # ********************** WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES,", "mlwh_beckman_cherrypicked(app, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() #", "mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts", "inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"sample\"], #", "= samples_collection.insert_many(SAMPLES) # yield a copy of so that the test change it", "yield priority_samples # clear up after the fixture is used with app.app_context(): priority_samples_collection.delete_many({})", "app.data.driver.db.source_plates _ = source_plates_collection.insert_many(SOURCE_PLATES) # yield a copy of that the test change", "that the test change it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events # clear", "{ \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def", "\"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\", } return", "however it needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with the", "as connection: print(\"Deleting MLWH test data\") connection.execute(table.delete()) @pytest.fixture def event_wh_data(app, event_wh_sql_engine): try: subjects_table", "needs priority_samples = copy.deepcopy(PRIORITY_SAMPLES) # update the priority samples with the _id of", "when creating an app os.environ[\"EVE_SETTINGS\"] = \"test.py\" app = create_app() yield app @pytest.fixture", "insert_into_mlwh(app, data, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete())", "finally: samples_collection.delete_many({}) @pytest.fixture def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine): def delete_data(): delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app,", "get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENTS_TABLE\"]) event_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_EVENT_TYPES_TABLE\"]) subject_types_table = get_table(event_wh_sql_engine, app.config[\"EVENT_WH_SUBJECT_TYPES_TABLE\"]) role_types_table = get_table(event_wh_sql_engine,", "events_collection.delete_many({}) @pytest.fixture def priority_samples(app, samples): _, samples = samples # create a copy", "table_name) with mlwh_sql_engine.begin() as connection: connection.execute(table.delete()) # delete all rows from table first", "app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield a copy so that the test change", "return PLATES_LOOKUP_WITH_SAMPLES @pytest.fixture def plates_lookup_without_samples(samples, priority_samples): return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context():", "set the 'EVE_SETTINGS' env variable to easily switch to the testing environment when", "= centres_collection.insert_many(CENTRES) # yield a copy so that the test change it however", "test change it however it wants yield copy.deepcopy(CENTRES) # clear up after the", "************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app,", "copy of so that the test change it however it wants yield copy.deepcopy(PLATE_EVENTS),", "\"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update():", "# @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine):", "dart_samples(app, dart_schema_create): with app.app_context(): load_sql_server_script(\"tests/data/dart/seed.sql\") @pytest.fixture def dart_mongo_merged_samples(): return DART_MONGO_MERGED_SAMPLES @pytest.fixture def event_wh_sql_engine(app):", "inserted_events = events_collection.insert_many(PLATE_EVENTS) # yield a copy of so that the test change", "Message(message_content) @pytest.fixture def message_source_complete(): message_content: EventMessage = { \"event\": { \"uuid\": \"1770dbcd-0abf-4293-ac62-dd26964f80b0\", \"event_type\":", "): run_url = f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def", "app.app_context(): samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a copy of so", "def mocked_responses(): \"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps:", "the fixture is used with app.app_context(): samples_collection.delete_many({}) @pytest.fixture def clear_events(app): try: yield finally:", "samples_collection = app.data.driver.db.samples inserted_samples = samples_collection.insert_many(samples) # yield a copy of so that", "= samples # create a copy so that the test can change it", "_id of the samples inserted into mongo, currently only uses the number #", "{\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"biosero_read_write\")} @pytest.fixture def lighthouse_ui_auth_headers(app): with app.app_context(): return {\"Authorization\": app.config.get(\"API_TOKENS_EVENTS\").get(\"lighthouse_ui_read_write\")} @pytest.fixture def centres(app):", "\"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture def samples_for_mlwh_update(): return", "test change it however it wants yield copy.deepcopy(SAMPLES), inserted_samples # clear up after", "@pytest.fixture def centres(app): with app.app_context(): centres_collection = app.data.driver.db.centres _ = centres_collection.insert_many(CENTRES) # yield", "mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) @pytest.fixture def mlwh_lh_samples_multiple(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine,", "mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"]) delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine,", "priority_sample in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ =", "in enumerate(priority_samples): priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count] with app.app_context(): priority_samples_collection = app.data.driver.db.priority_samples _ = priority_samples_collection.insert_many(priority_samples)", "= \"test.py\" app = create_app() yield app @pytest.fixture def client(app): return app.test_client() @pytest.fixture", "from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack from tests.fixtures.data.source_plates import SOURCE_PLATES", "priority samples with the _id of the samples inserted into mongo, currently only", "so that the test change it however it wants yield copy.deepcopy(PLATE_EVENTS), inserted_events #", "@pytest.fixture def mocked_responses(): \"\"\"Easily mock responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_STUDY_TABLE\"]) try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"], mlwh_sql_engine, app.config[\"MLWH_SAMPLE_TABLE\"], )", "[ { \"barcode\": \"plate_123\", \"location_barcode\": \"location_123\", } ] mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK) @pytest.fixture", "MLWH test data\") connection.execute(table.insert(), data) def delete_from_mlwh(app, mlwh_sql_engine, table_name): table = get_table(mlwh_sql_engine, table_name)", "\"data\": { \"id\": run_id, FIELD_CHERRYTRACK_USER_ID: \"user1\", FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: \"aLiquidHandlerSerialNumber\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: \"biosero\", FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: \"CPA\", }", "wants yield copy.deepcopy(samples), inserted_samples # clear up after the fixture is used finally:", "\"no_callbacks\", \"occured_at\": \"2020-11-26T15:58:20\", \"user_identifier\": \"test1\", \"subjects\": [], \"metadata\": {}, }, \"lims\": \"LH_TEST\", }", "\"friendly_name\": \"friendly_name\", \"uuid\": \"00000000-1111-2222-3333-555555555555\", }, { \"role_type\": \"cherrypicking_source_labware\", \"subject_type\": \"plate\", \"friendly_name\": \"plate-barcode\", \"uuid\":", "= f\"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}\" mocked_responses.add( responses.GET, run_url, json=cherrytrack_run_info_response, status=cherrytrack_mock_run_info_status, ) yield @pytest.fixture def baracoda_mock_barcodes_group(app, mocked_responses,", "copy of so that the test change it however it wants yield copy.deepcopy(SAMPLES),", "of priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES for count, priority_sample", "tests.fixtures.data.event_wh import EVENT_WH_DATA from tests.fixtures.data.mlwh import ( COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE,", "return PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker):", "try: delete_data() # inserts insert_into_mlwh( app, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE[\"lighthouse_sample\"], mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"], ) insert_into_mlwh( app, MLWH_SAMPLE_STOCK_RESOURCE[\"sample\"]", "delete_event_warehouse_data(): with event_wh_sql_engine.begin() as connection: connection.execute(roles_table.delete()) connection.execute(subjects_table.delete()) connection.execute(events_table.delete()) connection.execute(event_types_table.delete()) connection.execute(subject_types_table.delete()) connection.execute(role_types_table.delete()) delete_event_warehouse_data() with", "********************** WAREHOUSE DATA ************************** # @pytest.fixture def mlwh_lh_samples(app, mlwh_sql_engine): insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"])", "app.app_context(): source_plates_collection.delete_many({}) @pytest.fixture def plate_events(app): with app.app_context(): events_collection = app.data.driver.db.events inserted_events = events_collection.insert_many(PLATE_EVENTS)", "EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally: delete_event_warehouse_data() @pytest.fixture def mlwh_sql_engine(app): return create_mysql_connection_engine(app.config[\"WAREHOUSES_RW_CONN_STRING\"],", "PLATES_LOOKUP_WITHOUT_SAMPLES @pytest.fixture def mocked_rabbit_channel(app): with app.app_context(): mocked_broker = MagicMock() with patch(\"lighthouse.classes.services.warehouse.Broker\", return_value=mocked_broker): mocked_channel", "lighthouse import create_app from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED from lighthouse.constants.fields import ( FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER,", "it however it wants yield copy.deepcopy(samples), inserted_samples # clear up after the fixture", "# yield a copy of so that the test change it however it", "responses from HTTP calls. https://github.com/getsentry/responses#responses-as-a-pytest-fixture\"\"\" with responses.RequestsMock() as rsps: yield rsps @pytest.fixture def", "EVENT_WH_DATA[\"role_types\"]) connection.execute(event_types_table.insert(), EVENT_WH_DATA[\"event_types\"]) connection.execute(subject_types_table.insert(), EVENT_WH_DATA[\"subject_types\"]) connection.execute(subjects_table.insert(), EVENT_WH_DATA[\"subjects\"]) connection.execute(events_table.insert(), EVENT_WH_DATA[\"events\"]) connection.execute(roles_table.insert(), EVENT_WH_DATA[\"roles\"]) yield finally:", "delete_from_mlwh(app, mlwh_sql_engine, app.config[\"MLWH_LIGHTHOUSE_SAMPLE_TABLE\"]) try: delete_data() example = cherrytrack_mlwh_example(source_barcode) # inserts insert_into_mlwh( app, example[\"lighthouse_sample\"],", "build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES from tests.fixtures.data.event_wh import EVENT_WH_DATA", "if baracoda_mock_responses[centre_prefix] is not None: num_samples = len(baracoda_mock_responses[centre_prefix][\"barcodes_group\"][\"barcodes\"]) baracoda_url = ( f\"http://{app.config['BARACODA_URL']}\" f\"/barcodes_group/{centre_prefix}/new?count={num_samples}\"", "import build_cherrytrack_destination_plate_response from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response from tests.fixtures.data.centres import CENTRES from tests.fixtures.data.dart import" ]
[ "rest_auth.views import ( LoginView, LogoutView, UserDetailsView ) from rest_framework import routers from FoxhoundApp.TrafficApp.views", "we include login URLs for the browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls,", ") from rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [", "import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$',", "name='user'), ] # Wire up our API using automatic URL routing. # Additionally,", "LoginView, LogoutView, UserDetailsView ) from rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView", "rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ]", "url, include from rest_auth.views import ( LoginView, LogoutView, UserDetailsView ) from rest_framework import", "the browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/', ItemsView.as_view(), name='Items'),", "UserDetailsView.as_view(), name='user'), ] # Wire up our API using automatic URL routing. #", "= [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/', ItemsView.as_view(), name='Items'), url(r'^heatmap/', HeatMapView.as_view(), name='HeatMap'), ]", "LogoutView, UserDetailsView ) from rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls", "django.conf.urls import url, include from rest_auth.views import ( LoginView, LogoutView, UserDetailsView ) from", "URLs for the browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/',", "rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(),", "urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/', ItemsView.as_view(), name='Items'), url(r'^heatmap/', HeatMapView.as_view(), name='HeatMap'),", "FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'),", "# Wire up our API using automatic URL routing. # Additionally, we include", "( LoginView, LogoutView, UserDetailsView ) from rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView,", "url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up our API using", "UserDetailsView ) from rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls =", "url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up our API using automatic URL routing.", "using automatic URL routing. # Additionally, we include login URLs for the browsable", "= [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] #", "name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up our API", "automatic URL routing. # Additionally, we include login URLs for the browsable API.", "LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up our", "Wire up our API using automatic URL routing. # Additionally, we include login", "browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/', ItemsView.as_view(), name='Items'), url(r'^heatmap/',", "Additionally, we include login URLs for the browsable API. urlpatterns = [ url(r'^auth/',", "HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'),", "# Additionally, we include login URLs for the browsable API. urlpatterns = [", "] # Wire up our API using automatic URL routing. # Additionally, we", "<reponame>Anton250/MoscowCityHack2021_FoxoundTeam from django.conf.urls import url, include from rest_auth.views import ( LoginView, LogoutView, UserDetailsView", "up our API using automatic URL routing. # Additionally, we include login URLs", "import ( LoginView, LogoutView, UserDetailsView ) from rest_framework import routers from FoxhoundApp.TrafficApp.views import", "LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up our API using automatic", "include from rest_auth.views import ( LoginView, LogoutView, UserDetailsView ) from rest_framework import routers", "from rest_auth.views import ( LoginView, LogoutView, UserDetailsView ) from rest_framework import routers from", "API using automatic URL routing. # Additionally, we include login URLs for the", "import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'),", "include login URLs for the browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'),", "login URLs for the browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')),", "name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up our API using automatic URL", "routing. # Additionally, we include login URLs for the browsable API. urlpatterns =", "ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(),", "URL routing. # Additionally, we include login URLs for the browsable API. urlpatterns", "from rest_framework import routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$',", "API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/', ItemsView.as_view(), name='Items'), url(r'^heatmap/', HeatMapView.as_view(),", "routers from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$',", "[ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire", "our API using automatic URL routing. # Additionally, we include login URLs for", "from django.conf.urls import url, include from rest_auth.views import ( LoginView, LogoutView, UserDetailsView )", "url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(), name='rest_logout'), url(r'^user/$', UserDetailsView.as_view(), name='user'), ] # Wire up", "from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView rest_auth_urls = [ url(r'^login/$', LoginView.as_view(), name='rest_login'), url(r'^logout/$', LogoutView.as_view(),", "import url, include from rest_auth.views import ( LoginView, LogoutView, UserDetailsView ) from rest_framework", "for the browsable API. urlpatterns = [ url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')), url(r'^items/', ItemsView.as_view()," ]
[ "<reponame>avdata99/nic # Generated by Django 3.1.2 on 2020-10-14 01:10 from django.db import migrations,", "# Generated by Django 3.1.2 on 2020-10-14 01:10 from django.db import migrations, models", "2020-10-14 01:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cambios',", "models class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField(", "[ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio', name='campo', field=models.CharField(db_index=True, max_length=240, null=True),", "Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio', name='campo',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [", "3.1.2 on 2020-10-14 01:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio',", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ]", "by Django 3.1.2 on 2020-10-14 01:10 from django.db import migrations, models class Migration(migrations.Migration):", "01:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'),", "'0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio', name='campo', field=models.CharField(db_index=True, max_length=240, null=True), ), ]", "= [ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio', name='campo', field=models.CharField(db_index=True, max_length=240,", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations =", "on 2020-10-14 01:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio', name='campo', field=models.CharField(db_index=True, max_length=240, null=True), ),", "dependencies = [ ('cambios', '0002_campocambio_uid_anterior'), ] operations = [ migrations.AlterField( model_name='campocambio', name='campo', field=models.CharField(db_index=True,", "Generated by Django 3.1.2 on 2020-10-14 01:10 from django.db import migrations, models class", "Django 3.1.2 on 2020-10-14 01:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies" ]
[ "counter.counter == max_retries + 1 def test_throttled_boto_resource(): max_retries = 1 class BogusResource: def", "def bogus_function(counter): counter.count() if counter.counter <= max_retries: raise BogusException counter = Counter() bogus_function(counter)", "assert counter.counter == max_retries + 1 def test_throttled_boto_resource(): max_retries = 1 class BogusResource:", "def __init__(self): self.counter = 0 def count(self): self.counter += 1 def test_throttle_exception(): class", "1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <=", "{ 'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter,", "1 class BogusResource: def __init__(self, counter, max_retries): self.counter = counter self._max_retries = max_retries", "class BogusResource: def __init__(self, counter, max_retries): self.counter = counter self._max_retries = max_retries def", "+= 1 def test_throttle_exception(): class BogusException(Exception): pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException],", "BogusResource: def __init__(self, counter, max_retries): self.counter = counter self._max_retries = max_retries def bogus_function(self):", "BogusException(Exception): pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter):", "def __init__(self, counter, max_retries): self.counter = counter self._max_retries = max_retries def bogus_function(self): self.counter.count()", "self.counter = counter self._max_retries = max_retries def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries:", "counter.count() if counter.counter <= max_retries: raise BogusException counter = Counter() bogus_function(counter) assert counter.counter", "= 0 def count(self): self.counter += 1 def test_throttle_exception(): class BogusException(Exception): pass max_retries", "if counter.counter <= max_retries: raise BogusException counter = Counter() bogus_function(counter) assert counter.counter ==", "bogus_function(counter) assert counter.counter == max_retries + 1 def test_throttled_boto_resource(): max_retries = 1 class", "counter.counter <= max_retries: raise BogusException counter = Counter() bogus_function(counter) assert counter.counter == max_retries", "throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <= max_retries: raise BogusException", "self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus'", "osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter = 0 def count(self):", "= 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter", "counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function() assert counter.counter == max_retries +", "counter self._max_retries = max_retries def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError(", "def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error': { 'Code':", "error_response={ 'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter = Counter() bogus_resource", "pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count()", "max_retries + 1 def test_throttled_boto_resource(): max_retries = 1 class BogusResource: def __init__(self, counter,", "+ 1 def test_throttled_boto_resource(): max_retries = 1 class BogusResource: def __init__(self, counter, max_retries):", "operation_name='bogus' ) counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function() assert counter.counter ==", "if self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' } },", "self.counter = 0 def count(self): self.counter += 1 def test_throttle_exception(): class BogusException(Exception): pass", "ThrottledBotoResource class Counter: def __init__(self): self.counter = 0 def count(self): self.counter += 1", "def test_throttled_boto_resource(): max_retries = 1 class BogusResource: def __init__(self, counter, max_retries): self.counter =", "max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <= max_retries: raise BogusException counter", "self._max_retries: raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter", "import ClientError from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter =", "raise BogusException counter = Counter() bogus_function(counter) assert counter.counter == max_retries + 1 def", "class Counter: def __init__(self): self.counter = 0 def count(self): self.counter += 1 def", "self._max_retries = max_retries def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError( error_response={", "max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <= max_retries: raise BogusException counter =", "counter = Counter() bogus_function(counter) assert counter.counter == max_retries + 1 def test_throttled_boto_resource(): max_retries", "= 1 class BogusResource: def __init__(self, counter, max_retries): self.counter = counter self._max_retries =", "@retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <= max_retries: raise BogusException counter = Counter()", "'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries))", "class BogusException(Exception): pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def", "<= max_retries: raise BogusException counter = Counter() bogus_function(counter) assert counter.counter == max_retries +", "from botocore.exceptions import ClientError from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource class Counter: def __init__(self):", "max_retries): self.counter = counter self._max_retries = max_retries def bogus_function(self): self.counter.count() if self.counter.counter <=", "__init__(self, counter, max_retries): self.counter = counter self._max_retries = max_retries def bogus_function(self): self.counter.count() if", "ClientError from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter = 0", "bogus_function(counter): counter.count() if counter.counter <= max_retries: raise BogusException counter = Counter() bogus_function(counter) assert", "max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if", "raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter =", "0 def count(self): self.counter += 1 def test_throttle_exception(): class BogusException(Exception): pass max_retries =", "Counter: def __init__(self): self.counter = 0 def count(self): self.counter += 1 def test_throttle_exception():", "max_retries = 1 class BogusResource: def __init__(self, counter, max_retries): self.counter = counter self._max_retries", "def count(self): self.counter += 1 def test_throttle_exception(): class BogusException(Exception): pass max_retries = 1", "= throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <= max_retries: raise", "'ThrottlingException' } }, operation_name='bogus' ) counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function()", "= Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function() assert counter.counter == max_retries + 1", "Counter() bogus_function(counter) assert counter.counter == max_retries + 1 def test_throttled_boto_resource(): max_retries = 1", "= max_retries def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error':", "count(self): self.counter += 1 def test_throttle_exception(): class BogusException(Exception): pass max_retries = 1 retry_on_bogus", "self.counter += 1 def test_throttle_exception(): class BogusException(Exception): pass max_retries = 1 retry_on_bogus =", "botocore.exceptions import ClientError from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter", "bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException'", "== max_retries + 1 def test_throttled_boto_resource(): max_retries = 1 class BogusResource: def __init__(self,", ") counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function() assert counter.counter == max_retries", "<= self._max_retries: raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus' )", "BogusException counter = Counter() bogus_function(counter) assert counter.counter == max_retries + 1 def test_throttled_boto_resource():", "test_throttled_boto_resource(): max_retries = 1 class BogusResource: def __init__(self, counter, max_retries): self.counter = counter", "}, operation_name='bogus' ) counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function() assert counter.counter", "import throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter = 0 def count(self): self.counter", "= Counter() bogus_function(counter) assert counter.counter == max_retries + 1 def test_throttled_boto_resource(): max_retries =", "1 def test_throttled_boto_resource(): max_retries = 1 class BogusResource: def __init__(self, counter, max_retries): self.counter", "throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter = 0 def count(self): self.counter +=", "1 def test_throttle_exception(): class BogusException(Exception): pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1,", "self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' }", "ClientError( error_response={ 'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter = Counter()", "from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource class Counter: def __init__(self): self.counter = 0 def", "def test_throttle_exception(): class BogusException(Exception): pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries)", "max_retries def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise ClientError( error_response={ 'Error': {", "counter, max_retries): self.counter = counter self._max_retries = max_retries def bogus_function(self): self.counter.count() if self.counter.counter", "test_throttle_exception(): class BogusException(Exception): pass max_retries = 1 retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus", "'Error': { 'Code': 'ThrottlingException' } }, operation_name='bogus' ) counter = Counter() bogus_resource =", "__init__(self): self.counter = 0 def count(self): self.counter += 1 def test_throttle_exception(): class BogusException(Exception):", "max_retries: raise BogusException counter = Counter() bogus_function(counter) assert counter.counter == max_retries + 1", "retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries) @retry_on_bogus def bogus_function(counter): counter.count() if counter.counter <= max_retries:", "= counter self._max_retries = max_retries def bogus_function(self): self.counter.count() if self.counter.counter <= self._max_retries: raise", "} }, operation_name='bogus' ) counter = Counter() bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries)) bogus_resource.bogus_function() assert" ]
[ "optional parent :param parent: Parent object \"\"\" super().__init__(parent) # Get currencies from DB", "DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list into view", "\"\"\" super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results =", "return QStandardItem() def index_for(self, symbol): \"\"\" Get the index for a provided symbol", "AppCore() items = {} def __init__(self, parent=None): \"\"\" Initialise with optional parent :param", "item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self, symbol): \"\"\" Get the item for", "parent :param parent: Parent object \"\"\" super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT", "object \"\"\" super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results", "index for a provided symbol :param symbol: Symbol :return: Index for symbol (invalid", "item = self.items[symbol] return item finally: return QStandardItem() def index_for(self, symbol): \"\"\" Get", "= self.items[symbol] return item finally: return QStandardItem() def index_for(self, symbol): \"\"\" Get the", "currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self, symbol):", "item_for(self, symbol): \"\"\" Get the item for a provided symbol :param symbol: Symbol", "= QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self, symbol): \"\"\" Get", "AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known symbols from the DB", "__init__(self, parent=None): \"\"\" Initialise with optional parent :param parent: Parent object \"\"\" super().__init__(parent)", "\"\"\" Initialise with optional parent :param parent: Parent object \"\"\" super().__init__(parent) # Get", "not found) \"\"\" try: item = self.items[symbol] return item finally: return QStandardItem() def", "def __init__(self, parent=None): \"\"\" Initialise with optional parent :param parent: Parent object \"\"\"", "Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert", ":return: Item for symbol (invalid if not found) \"\"\" try: item = self.items[symbol]", "Item for symbol (invalid if not found) \"\"\" try: item = self.items[symbol] return", "core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known symbols from", "= {} def __init__(self, parent=None): \"\"\" Initialise with optional parent :param parent: Parent", "for symbol (invalid if not found) \"\"\" try: item = self.items[symbol] return item", "fetches all known symbols from the DB and displays them in a list", "\"\"\" Get the item for a provided symbol :param symbol: Symbol :return: Item", "PyQt5.QtCore import QModelIndex from PyQt5.QtGui import QStandardItem, QStandardItemModel from core import AppCore class", "QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self, symbol): \"\"\" Get the", "import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known symbols from the", "= currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self,", "self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list into view for", "currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list into view for currency in results:", "symbol: Symbol :return: Item for symbol (invalid if not found) \"\"\" try: item", "QModelIndex from PyQt5.QtGui import QStandardItem, QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\"", "displays them in a list \"\"\" core = AppCore() items = {} def", "the index for a provided symbol :param symbol: Symbol :return: Index for symbol", "Get the item for a provided symbol :param symbol: Symbol :return: Item for", "Symbol :return: Item for symbol (invalid if not found) \"\"\" try: item =", "item self.appendRow(item) def item_for(self, symbol): \"\"\" Get the item for a provided symbol", "item finally: return QStandardItem() def index_for(self, symbol): \"\"\" Get the index for a", "Symbol :return: Index for symbol (invalid if not found) \"\"\" try: item =", "Parent object \"\"\" super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\")", "\"\"\" try: item = self.items[symbol] return item finally: return QStandardItem() def index_for(self, symbol):", "Insert list into view for currency in results: symbol = currency[0] item =", "for currency in results: symbol = currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol]", "= item self.appendRow(item) def item_for(self, symbol): \"\"\" Get the item for a provided", "the item for a provided symbol :param symbol: Symbol :return: Item for symbol", "\"\"\" Model which fetches all known symbols from the DB and displays them", "item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self, symbol): \"\"\" Get the item", "known symbols from the DB and displays them in a list \"\"\" core", "DB and displays them in a list \"\"\" core = AppCore() items =", "core = AppCore() items = {} def __init__(self, parent=None): \"\"\" Initialise with optional", "import QModelIndex from PyQt5.QtGui import QStandardItem, QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel):", "a list \"\"\" core = AppCore() items = {} def __init__(self, parent=None): \"\"\"", "QStandardItem, QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all", "results: symbol = currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item)", "finally: return QStandardItem() def index_for(self, symbol): \"\"\" Get the index for a provided", "self.items[symbol] return item finally: return QStandardItem() def index_for(self, symbol): \"\"\" Get the index", "PyQt5.QtGui import QStandardItem, QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which", "view for currency in results: symbol = currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False)", "symbol :param symbol: Symbol :return: Index for symbol (invalid if not found) \"\"\"", "symbol :param symbol: Symbol :return: Item for symbol (invalid if not found) \"\"\"", "QStandardItem() def index_for(self, symbol): \"\"\" Get the index for a provided symbol :param", "self.items[symbol] = item self.appendRow(item) def item_for(self, symbol): \"\"\" Get the item for a", "CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known symbols from the DB and displays", ":param symbol: Symbol :return: Item for symbol (invalid if not found) \"\"\" try:", "list into view for currency in results: symbol = currency[0] item = QStandardItem()", "try: item = self.items[symbol] return item finally: return QStandardItem() def index_for(self, symbol): \"\"\"", "for symbol (invalid if not found) \"\"\" try: item = self.items[symbol] return item.index()", "symbol (invalid if not found) \"\"\" try: item = self.items[symbol] return item finally:", "a provided symbol :param symbol: Symbol :return: Item for symbol (invalid if not", "Model which fetches all known symbols from the DB and displays them in", "(invalid if not found) \"\"\" try: item = self.items[symbol] return item.index() finally: return", "index_for(self, symbol): \"\"\" Get the index for a provided symbol :param symbol: Symbol", "Get the index for a provided symbol :param symbol: Symbol :return: Index for", "from the DB and displays them in a list \"\"\" core = AppCore()", "from PyQt5.QtGui import QStandardItem, QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model", "self.appendRow(item) def item_for(self, symbol): \"\"\" Get the item for a provided symbol :param", "for a provided symbol :param symbol: Symbol :return: Item for symbol (invalid if", "# Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() #", "QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known", "into view for currency in results: symbol = currency[0] item = QStandardItem() item.setText(symbol)", "{} def __init__(self, parent=None): \"\"\" Initialise with optional parent :param parent: Parent object", "and displays them in a list \"\"\" core = AppCore() items = {}", "symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list into view for currency", "super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall()", "if not found) \"\"\" try: item = self.items[symbol] return item.index() finally: return QModelIndex()", ":param parent: Parent object \"\"\" super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol", "symbols from the DB and displays them in a list \"\"\" core =", "currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list", "import QStandardItem, QStandardItemModel from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches", "symbol): \"\"\" Get the item for a provided symbol :param symbol: Symbol :return:", "results = self.core.get_data_store().cursor.fetchall() # Insert list into view for currency in results: symbol", "found) \"\"\" try: item = self.items[symbol] return item finally: return QStandardItem() def index_for(self,", "items = {} def __init__(self, parent=None): \"\"\" Initialise with optional parent :param parent:", "= AppCore() items = {} def __init__(self, parent=None): \"\"\" Initialise with optional parent", "\"\"\" core = AppCore() items = {} def __init__(self, parent=None): \"\"\" Initialise with", "# Insert list into view for currency in results: symbol = currency[0] item", "\"\"\" Get the index for a provided symbol :param symbol: Symbol :return: Index", "item for a provided symbol :param symbol: Symbol :return: Item for symbol (invalid", "in a list \"\"\" core = AppCore() items = {} def __init__(self, parent=None):", "(invalid if not found) \"\"\" try: item = self.items[symbol] return item finally: return", "list \"\"\" core = AppCore() items = {} def __init__(self, parent=None): \"\"\" Initialise", "provided symbol :param symbol: Symbol :return: Item for symbol (invalid if not found)", "def item_for(self, symbol): \"\"\" Get the item for a provided symbol :param symbol:", "if not found) \"\"\" try: item = self.items[symbol] return item finally: return QStandardItem()", "a provided symbol :param symbol: Symbol :return: Index for symbol (invalid if not", "Index for symbol (invalid if not found) \"\"\" try: item = self.items[symbol] return", "= self.core.get_data_store().cursor.fetchall() # Insert list into view for currency in results: symbol =", "the DB and displays them in a list \"\"\" core = AppCore() items", "from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list into", "self.core.get_data_store().cursor.fetchall() # Insert list into view for currency in results: symbol = currency[0]", "<filename>qt/CurrencyListModel.py from PyQt5.QtCore import QModelIndex from PyQt5.QtGui import QStandardItem, QStandardItemModel from core import", "return item finally: return QStandardItem() def index_for(self, symbol): \"\"\" Get the index for", "class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known symbols from the DB and", "item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def item_for(self, symbol): \"\"\"", "symbol): \"\"\" Get the index for a provided symbol :param symbol: Symbol :return:", ":param symbol: Symbol :return: Index for symbol (invalid if not found) \"\"\" try:", ":return: Index for symbol (invalid if not found) \"\"\" try: item = self.items[symbol]", "def index_for(self, symbol): \"\"\" Get the index for a provided symbol :param symbol:", "which fetches all known symbols from the DB and displays them in a", "parent=None): \"\"\" Initialise with optional parent :param parent: Parent object \"\"\" super().__init__(parent) #", "with optional parent :param parent: Parent object \"\"\" super().__init__(parent) # Get currencies from", "from PyQt5.QtCore import QModelIndex from PyQt5.QtGui import QStandardItem, QStandardItemModel from core import AppCore", "from core import AppCore class CurrencyListModel(QStandardItemModel): \"\"\" Model which fetches all known symbols", "Initialise with optional parent :param parent: Parent object \"\"\" super().__init__(parent) # Get currencies", "symbol = currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item self.appendRow(item) def", "all known symbols from the DB and displays them in a list \"\"\"", "them in a list \"\"\" core = AppCore() items = {} def __init__(self,", "symbol: Symbol :return: Index for symbol (invalid if not found) \"\"\" try: item", "from currency_symbols\") results = self.core.get_data_store().cursor.fetchall() # Insert list into view for currency in", "symbol (invalid if not found) \"\"\" try: item = self.items[symbol] return item.index() finally:", "currency in results: symbol = currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] =", "for a provided symbol :param symbol: Symbol :return: Index for symbol (invalid if", "in results: symbol = currency[0] item = QStandardItem() item.setText(symbol) item.setEditable(False) self.items[symbol] = item", "parent: Parent object \"\"\" super().__init__(parent) # Get currencies from DB self.core.get_data_store().cursor.execute(\"SELECT symbol from", "provided symbol :param symbol: Symbol :return: Index for symbol (invalid if not found)" ]
[ "{0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection) for d in", "self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls)", "= self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection = connection", "needs to be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls,", "__init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase self.name = name", "if es.instance_url is None: if self.is_collection: return cache.get('collection', []) return cache.get('single', None) cache_type", "= connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance, value): \"\"\" :type instance:", "= self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection: return cache.get('collection', []) return cache.get('single',", "# noinspection PyUnresolvedReferences from urllib.parse import urljoin except ImportError: # noinspection PyUnresolvedReferences from", "= connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink')", "raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection = connection return e", "for the new Order order.Shipper = my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences", "connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data", "ImportError: # noinspection PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object): \"\"\" A Property-like", "_get_parent_cache(self, instance): es = instance.__odata__ ic = es.nav_cache if self.name not in ic:", "# -*- coding: utf-8 -*- \"\"\" Navigation properties --------------------- The entity can define", "query a shipper instance, just for this example Shipper = Service.entities['Shipper'] my_shipper =", "__repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d,", "ic = es.nav_cache if self.name not in ic: cache = {} ic[self.name] =", "raw_data else None def instance_from_data(self, raw_data, connection): # mwa: this needs to be", "form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__", "_getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return matched_class for subclass in matched_class.__subclasses__(): if", "foreign_key.name else: self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self,", "order.Shipper.CompanyName 'Federal Shipping' When creating new instances, relationships can be assigned via navigation", "odata_type: return matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass,", "= es.connection parent_url = es.instance_url parent_url += '/' url = urljoin(parent_url, self.name) raw_data", "is None: if self.is_collection: return cache.get('collection', []) return cache.get('single', None) cache_type = 'collection'", "def _get_parent_cache(self, instance): es = instance.__odata__ ic = es.nav_cache if self.name not in", "= instance.__odata__ ic = es.nav_cache if self.name not in ic: cache = {}", "es.connection parent_url = es.instance_url parent_url += '/' url = urljoin(parent_url, self.name) raw_data =", "cache = ic[self.name] return cache def _get_instances_from_server(self, instance): es = instance.__odata__ connection =", "code-block:: python # query a shipper instance, just for this example Shipper =", "= name self.entitycls = entitycls self.is_collection = collection self.is_containment = containment if isinstance(foreign_key,", "self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key", "other entities. These are known as navigation properties and are supported in this", "es = instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection: return", "-*- \"\"\" Navigation properties --------------------- The entity can define properties that link to", "{} ic[self.name] = cache else: cache = ic[self.name] return cache def _get_instances_from_server(self, instance):", "return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection)", "if not odata_type: return matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]:", "my_shipper = Service.query(Shipper).first() # assign for the new Order order.Shipper = my_shipper Service.save(order)", "instance): es = instance.__odata__ ic = es.nav_cache if self.name not in ic: cache", "cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\"", "via navigation properties: .. code-block:: python # query a shipper instance, just for", "= cache else: cache = ic[self.name] return cache def _get_instances_from_server(self, instance): es =", "instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else: cache['single']", "= collection self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key", "= instance.__odata__ connection = es.connection parent_url = es.instance_url parent_url += '/' url =", "instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\"", "return self.instance_from_data(raw_data, connection) if raw_data else None def instance_from_data(self, raw_data, connection): # mwa:", "= 'collection' if self.is_collection else 'single' try: return cache[cache_type] except KeyError: cache[cache_type] =", "relationships can be assigned via navigation properties: .. code-block:: python # query a", "cache['collection'] = value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\"", "Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for the new Order order.Shipper = my_shipper", "__set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection:", "e def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return matched_class for subclass in", "None) cache_type = 'collection' if self.is_collection else 'single' try: return cache[cache_type] except KeyError:", "The entity can define properties that link to other entities. These are known", "[] else: return self.instance_from_data(raw_data, connection) if raw_data else None def instance_from_data(self, raw_data, connection):", "es = instance.__odata__ connection = es.connection parent_url = es.instance_url parent_url += '/' url", "parent_url = es.instance_url parent_url += '/' url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url)", "odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else: cache['single'] =", "this library. .. code-block:: python >>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>>", "= Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating new instances,", "These are known as navigation properties and are supported in this library. ..", "= entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection = connection return e def _getClass_by_response_type(self,", "new Order order.Shipper = my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse", "entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection = connection return e def _getClass_by_response_type(self, matched_class,", "return cache.get('single', None) cache_type = 'collection' if self.is_collection else 'single' try: return cache[cache_type]", "ic: cache = {} ic[self.name] = cache else: cache = ic[self.name] return cache", "value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] =", "for marking relationships between entities, but does not inherit from PropertyBase. \"\"\" def", "cache.get('collection', []) return cache.get('single', None) cache_type = 'collection' if self.is_collection else 'single' try:", "instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance is", "name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase self.name = name self.entitycls", "def _get_instances_from_server(self, instance): es = instance.__odata__ connection = es.connection parent_url = es.instance_url parent_url", "\"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else: cache['single'] = value", "= es.nav_cache if self.name not in ic: cache = {} ic[self.name] = cache", ":type instance: odata.entity.EntityBase \"\"\" if instance is None: return self es = instance.__odata__", "subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance): es =", "<Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating new instances, relationships can be assigned", "entity can define properties that link to other entities. These are known as", "mwa: this needs to be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e", "self.is_collection = collection self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else:", "entities. These are known as navigation properties and are supported in this library.", "instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data =", "if instance is None: return self es = instance.__odata__ cache = self._get_parent_cache(instance) if", "es.instance_url is None: if self.is_collection: return cache.get('collection', []) return cache.get('single', None) cache_type =", "connection) for d in raw_data['value']] if raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection)", "\"\"\" :type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value", "link to other entities. These are known as navigation properties and are supported", "Property-like object for marking relationships between entities, but does not inherit from PropertyBase.", "connection) while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection))", "return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance): es = instance.__odata__ ic =", "this example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for the new", "odata.property import PropertyBase self.name = name self.entitycls = entitycls self.is_collection = collection self.is_containment", "matched_class, odata_type): if not odata_type: return matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__", "raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance, value): \"\"\"", "u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection) for", "= e.__odata__ es.connection = connection return e def _getClass_by_response_type(self, matched_class, odata_type): if not", "does not inherit from PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False):", "\"\"\" if instance is None: return self es = instance.__odata__ cache = self._get_parent_cache(instance)", "raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value']", "known as navigation properties and are supported in this library. .. code-block:: python", "instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated form navproperty entitycls", "Navigation properties --------------------- The entity can define properties that link to other entities.", "shipper instance, just for this example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() #", "odata_type): if not odata_type: return matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ ==", "assigned via navigation properties: .. code-block:: python # query a shipper instance, just", "cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else: cache['single'] = value instance.__odata__.set_property_dirty(self)", "new instances, relationships can be assigned via navigation properties: .. code-block:: python #", "= value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type", "code-block:: python >>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping'", "navigation properties and are supported in this library. .. code-block:: python >>> order", "connection)) return instances def __set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache", "are supported in this library. .. code-block:: python >>> order = Service.query(Order).first() >>>", "raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self,", "return cache.get('collection', []) return cache.get('single', None) cache_type = 'collection' if self.is_collection else 'single'", "# noinspection PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object): \"\"\" A Property-like object", "that link to other entities. These are known as navigation properties and are", "== odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance): es = instance.__odata__", "assign for the new Order order.Shipper = my_shipper Service.save(order) \"\"\" try: # noinspection", "if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance): es", "'@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances", "def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase self.name =", "instance: odata.entity.EntityBase \"\"\" if instance is None: return self es = instance.__odata__ cache", "import urljoin class NavigationProperty(object): \"\"\" A Property-like object for marking relationships between entities,", "PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object): \"\"\" A Property-like object for marking", "url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink'", "'/' url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while", "Service.query(Shipper).first() # assign for the new Order order.Shipper = my_shipper Service.save(order) \"\"\" try:", "coding: utf-8 -*- \"\"\" Navigation properties --------------------- The entity can define properties that", "= self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def", "subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def", "cache.get('single', None) cache_type = 'collection' if self.is_collection else 'single' try: return cache[cache_type] except", "except ImportError: # noinspection PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object): \"\"\" A", "= my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse import urljoin except", "this needs to be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e =", "_get_instances_from_server(self, instance): es = instance.__odata__ connection = es.connection parent_url = es.instance_url parent_url +=", "return self es = instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is None: if", "to other entities. These are known as navigation properties and are supported in", "marking relationships between entities, but does not inherit from PropertyBase. \"\"\" def __init__(self,", "Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for the new Order order.Shipper", "import PropertyBase self.name = name self.entitycls = entitycls self.is_collection = collection self.is_containment =", ".. code-block:: python >>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal", "not inherit from PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from", "python # query a shipper instance, just for this example Shipper = Service.entities['Shipper']", "for d in raw_data['value']] if raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection) if", "else: cache = ic[self.name] return cache def _get_instances_from_server(self, instance): es = instance.__odata__ connection", "= Service.query(Shipper).first() # assign for the new Order order.Shipper = my_shipper Service.save(order) \"\"\"", "[]) return cache.get('single', None) cache_type = 'collection' if self.is_collection else 'single' try: return", "= foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if", "\"\"\" A Property-like object for marking relationships between entities, but does not inherit", "instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance is None: return self", "self.name = name self.entitycls = entitycls self.is_collection = collection self.is_containment = containment if", "connection) if raw_data else None def instance_from_data(self, raw_data, connection): # mwa: this needs", "supported in this library. .. code-block:: python >>> order = Service.query(Order).first() >>> order.Shipper", "in this library. .. code-block:: python >>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)>", "return e def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return matched_class for subclass", "if self.is_collection: return cache.get('collection', []) return cache.get('single', None) cache_type = 'collection' if self.is_collection", "= {} ic[self.name] = cache else: cache = ic[self.name] return cache def _get_instances_from_server(self,", "a shipper instance, just for this example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first()", "connection = es.connection parent_url = es.instance_url parent_url += '/' url = urljoin(parent_url, self.name)", "properties that link to other entities. These are known as navigation properties and", "navigation properties: .. code-block:: python # query a shipper instance, just for this", "properties: .. code-block:: python # query a shipper instance, just for this example", "owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance is None: return self es", "name self.entitycls = entitycls self.is_collection = collection self.is_containment = containment if isinstance(foreign_key, PropertyBase):", "while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return", "containment=False): from odata.property import PropertyBase self.name = name self.entitycls = entitycls self.is_collection =", "if self.name not in ic: cache = {} ic[self.name] = cache else: cache", "parent_url += '/' url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data,", "= connection return e def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return matched_class", "urljoin except ImportError: # noinspection PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object): \"\"\"", "Order order.Shipper = my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse import", "None: return self es = instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is None:", "matched_class def _get_parent_cache(self, instance): es = instance.__odata__ ic = es.nav_cache if self.name not", "PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase", "ic[self.name] = cache else: cache = ic[self.name] return cache def _get_instances_from_server(self, instance): es", "is None: return self es = instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is", "as navigation properties and are supported in this library. .. code-block:: python >>>", "e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection = connection return e def", "relationships between entities, but does not inherit from PropertyBase. \"\"\" def __init__(self, name,", "my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse import urljoin except ImportError:", "creating new instances, relationships can be assigned via navigation properties: .. code-block:: python", "self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url", "es = e.__odata__ es.connection = connection return e def _getClass_by_response_type(self, matched_class, odata_type): if", "PropertyBase self.name = name self.entitycls = entitycls self.is_collection = collection self.is_containment = containment", "raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url =", "\"\"\" Navigation properties --------------------- The entity can define properties that link to other", "cache_type = 'collection' if self.is_collection else 'single' try: return cache[cache_type] except KeyError: cache[cache_type]", "self.is_collection else 'single' try: return cache[cache_type] except KeyError: cache[cache_type] = self._get_instances_from_server(instance) return cache[cache_type]", "from_data=raw_data) es = e.__odata__ es.connection = connection return e def _getClass_by_response_type(self, matched_class, odata_type):", "es = instance.__odata__ ic = es.nav_cache if self.name not in ic: cache =", "def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection: return", "if self.is_collection: cache['collection'] = value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance,", "properties and are supported in this library. .. code-block:: python >>> order =", "import urljoin except ImportError: # noinspection PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object):", "order.Shipper = my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse import urljoin", "self.is_collection: return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else [] else:", "just for this example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for", ".. code-block:: python # query a shipper instance, just for this example Shipper", "connection): # mwa: this needs to be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls,", "isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty", "raw_data['value']] if raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection) if raw_data else None", "self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance): es = instance.__odata__ ic = es.nav_cache", "example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for the new Order", "NavigationProperty(object): \"\"\" A Property-like object for marking relationships between entities, but does not", "define properties that link to other entities. These are known as navigation properties", "collection self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key =", "instance.__odata__ ic = es.nav_cache if self.name not in ic: cache = {} ic[self.name]", "\"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase self.name", "__get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance is None: return", "= Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for the new Order order.Shipper =", "= urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in", "self.is_collection: return cache.get('collection', []) return cache.get('single', None) cache_type = 'collection' if self.is_collection else", "d in raw_data['value']] if raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection) if raw_data", "foreign_key=None, containment=False): from odata.property import PropertyBase self.name = name self.entitycls = entitycls self.is_collection", "but does not inherit from PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None,", "self.entitycls = entitycls self.is_collection = collection self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key", "seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es =", "+= '/' url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection)", "are known as navigation properties and are supported in this library. .. code-block::", "PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty to", "return matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type)", "if raw_data else None def instance_from_data(self, raw_data, connection): # mwa: this needs to", ">>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating", "urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances = self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data:", "instance): es = instance.__odata__ connection = es.connection parent_url = es.instance_url parent_url += '/'", "instance.__odata__ connection = es.connection parent_url = es.instance_url parent_url += '/' url = urljoin(parent_url,", "'collection' if self.is_collection else 'single' try: return cache[cache_type] except KeyError: cache[cache_type] = self._get_instances_from_server(instance)", "instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection: return cache.get('collection', [])", "not odata_type: return matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return", "be assigned via navigation properties: .. code-block:: python # query a shipper instance,", "else [] else: return self.instance_from_data(raw_data, connection) if raw_data else None def instance_from_data(self, raw_data,", "def __get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance is None:", "in raw_data['value']] if raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection) if raw_data else", "can be assigned via navigation properties: .. code-block:: python # query a shipper", "= ic[self.name] return cache def _get_instances_from_server(self, instance): es = instance.__odata__ connection = es.connection", "instances def __set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance)", "return cache def _get_instances_from_server(self, instance): es = instance.__odata__ connection = es.connection parent_url =", "library. .. code-block:: python >>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName", ">>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating new instances, relationships can", "can define properties that link to other entities. These are known as navigation", "the new Order order.Shipper = my_shipper Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from", "else None def instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated", "self.name not in ic: cache = {} ic[self.name] = cache else: cache =", "= self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url)", "in raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def", "urllib.parse import urljoin except ImportError: # noinspection PyUnresolvedReferences from urlparse import urljoin class", "utf-8 -*- \"\"\" Navigation properties --------------------- The entity can define properties that link", "# assign for the new Order order.Shipper = my_shipper Service.save(order) \"\"\" try: #", "urlparse import urljoin class NavigationProperty(object): \"\"\" A Property-like object for marking relationships between", "noinspection PyUnresolvedReferences from urlparse import urljoin class NavigationProperty(object): \"\"\" A Property-like object for", "noinspection PyUnresolvedReferences from urllib.parse import urljoin except ImportError: # noinspection PyUnresolvedReferences from urlparse", "return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else [] else: return", "self.instance_from_data(raw_data, connection) if raw_data else None def instance_from_data(self, raw_data, connection): # mwa: this", "= es.instance_url parent_url += '/' url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances", "es.instance_url parent_url += '/' url = urljoin(parent_url, self.name) raw_data = connection.execute_get(url) instances =", "raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance, value): \"\"\" :type", "order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating new instances, relationships can be", "e.__odata__ es.connection = connection return e def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type:", "else: return self.instance_from_data(raw_data, connection) if raw_data else None def instance_from_data(self, raw_data, connection): #", "matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance):", "= entitycls self.is_collection = collection self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key =", "None def instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated form", "value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance", "A Property-like object for marking relationships between entities, but does not inherit from", "-*- coding: utf-8 -*- \"\"\" Navigation properties --------------------- The entity can define properties", "return matched_class def _get_parent_cache(self, instance): es = instance.__odata__ ic = es.nav_cache if self.name", "es.connection = connection return e def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return", "\"\"\" :type instance: odata.entity.EntityBase \"\"\" if instance is None: return self es =", "connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase", "if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key def __repr__(self): return", "connection return e def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return matched_class for", "for this example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign for the", "cache def _get_instances_from_server(self, instance): es = instance.__odata__ connection = es.connection parent_url = es.instance_url", "'Federal Shipping' When creating new instances, relationships can be assigned via navigation properties:", "self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection = connection return", "instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection']", "from PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property import", "odata.entity.EntityBase \"\"\" if instance is None: return self es = instance.__odata__ cache =", "def instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection) for d in raw_data['value']]", "try: # noinspection PyUnresolvedReferences from urllib.parse import urljoin except ImportError: # noinspection PyUnresolvedReferences", "url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance,", "if raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection) if raw_data else None def", "self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection):", "class NavigationProperty(object): \"\"\" A Property-like object for marking relationships between entities, but does", "es.nav_cache if self.name not in ic: cache = {} ic[self.name] = cache else:", ":type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else:", "[self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else [] else: return self.instance_from_data(raw_data,", "collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase self.name = name self.entitycls = entitycls", "in ic: cache = {} ic[self.name] = cache else: cache = ic[self.name] return", "navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection", "ic[self.name] return cache def _get_instances_from_server(self, instance): es = instance.__odata__ connection = es.connection parent_url", "self._get_parent_cache(instance) if self.is_collection: cache['collection'] = value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self,", "cache = self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection: return cache.get('collection', []) return", "raw_data, connection): # mwa: this needs to be seperated form navproperty entitycls =", "be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es", "entitycls, collection=False, foreign_key=None, containment=False): from odata.property import PropertyBase self.name = name self.entitycls =", "def __set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache = self._get_parent_cache(instance) if", "\"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse import urljoin except ImportError: # noinspection", "self es = instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection:", "from odata.property import PropertyBase self.name = name self.entitycls = entitycls self.is_collection = collection", "# mwa: this needs to be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type'))", "odata_type) return matched_class def _get_parent_cache(self, instance): es = instance.__odata__ ic = es.nav_cache if", "inherit from PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False): from odata.property", "to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection) for d", "instance, just for this example Shipper = Service.entities['Shipper'] my_shipper = Service.query(Shipper).first() # assign", "= foreign_key.name else: self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def", "else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase", "instance is None: return self es = instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url", "= raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data, connection)) return instances def __set__(self, instance, value):", "connection): if self.is_collection: return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else", "When creating new instances, relationships can be assigned via navigation properties: .. code-block::", "cache = {} ic[self.name] = cache else: cache = ic[self.name] return cache def", "Service.save(order) \"\"\" try: # noinspection PyUnresolvedReferences from urllib.parse import urljoin except ImportError: #", "entities, but does not inherit from PropertyBase. \"\"\" def __init__(self, name, entitycls, collection=False,", "order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating new", "in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self,", "entitycls self.is_collection = collection self.is_containment = containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name", "to be seperated form navproperty entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data)", "def instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated form navproperty", "value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type instance:", "= instance.__odata__ cache = self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection: return cache.get('collection',", "def _getClass_by_response_type(self, matched_class, odata_type): if not odata_type: return matched_class for subclass in matched_class.__subclasses__():", "properties --------------------- The entity can define properties that link to other entities. These", "--------------------- The entity can define properties that link to other entities. These are", "self.is_collection: cache['collection'] = value else: cache['single'] = value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner):", "Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When creating new instances, relationships", "odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class def _get_parent_cache(self, instance): es = instance.__odata__ ic", "= value instance.__odata__.set_property_dirty(self) def __get__(self, instance, owner): \"\"\" :type instance: odata.entity.EntityBase \"\"\" if", "return instances def __set__(self, instance, value): \"\"\" :type instance: odata.entity.EntityBase \"\"\" cache =", "and are supported in this library. .. code-block:: python >>> order = Service.query(Order).first()", "# query a shipper instance, just for this example Shipper = Service.entities['Shipper'] my_shipper", "= containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key def", "None: if self.is_collection: return cache.get('collection', []) return cache.get('single', None) cache_type = 'collection' if", "instances, relationships can be assigned via navigation properties: .. code-block:: python # query", "urljoin class NavigationProperty(object): \"\"\" A Property-like object for marking relationships between entities, but", "foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data, connection): if self.is_collection:", "self.instances_from_data(raw_data, connection) while '@odata.nextLink' in raw_data: url = raw_data.get('@odata.nextLink') raw_data = connection.execute_get(url) instances.extend(self.instances_from_data(raw_data,", "PyUnresolvedReferences from urllib.parse import urljoin except ImportError: # noinspection PyUnresolvedReferences from urlparse import", "object for marking relationships between entities, but does not inherit from PropertyBase. \"\"\"", "from urlparse import urljoin class NavigationProperty(object): \"\"\" A Property-like object for marking relationships", "cache else: cache = ic[self.name] return cache def _get_instances_from_server(self, instance): es = instance.__odata__", "instances_from_data(self, raw_data, connection): if self.is_collection: return [self.instance_from_data(d, connection) for d in raw_data['value']] if", ">>> order.Shipper.CompanyName 'Federal Shipping' When creating new instances, relationships can be assigned via", "entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type')) e = entitycls.__new__(entitycls, from_data=raw_data) es = e.__odata__ es.connection =", "raw_data['value'] else [] else: return self.instance_from_data(raw_data, connection) if raw_data else None def instance_from_data(self,", "from urllib.parse import urljoin except ImportError: # noinspection PyUnresolvedReferences from urlparse import urljoin", "Shipping' When creating new instances, relationships can be assigned via navigation properties: ..", "between entities, but does not inherit from PropertyBase. \"\"\" def __init__(self, name, entitycls,", "not in ic: cache = {} ic[self.name] = cache else: cache = ic[self.name]", "python >>> order = Service.query(Order).first() >>> order.Shipper <Entity(Shipper:3)> >>> order.Shipper.CompanyName 'Federal Shipping' When", "if self.is_collection else 'single' try: return cache[cache_type] except KeyError: cache[cache_type] = self._get_instances_from_server(instance) return", "else: self.foreign_key = foreign_key def __repr__(self): return u'<NavigationProperty to {0}>'.format(self.entitycls) def instances_from_data(self, raw_data,", "matched_class for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return", "for subclass in matched_class.__subclasses__(): if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type) return matched_class", "self._get_parent_cache(instance) if es.instance_url is None: if self.is_collection: return cache.get('collection', []) return cache.get('single', None)", "if self.is_collection: return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else []", "containment if isinstance(foreign_key, PropertyBase): self.foreign_key = foreign_key.name else: self.foreign_key = foreign_key def __repr__(self):" ]
[ "Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender:", "= click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def", "def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor() invoice:", "'--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender: str, template: str):", "sender: str, template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r',", "from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context", "pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard", "type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice:", "'--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str,", "@click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str,", "str): ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i',", "str, template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver',", "click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard:", "template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str)", ".billingyard import BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t',", "import BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template',", "BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat',", "cli(ctx, sender: str, template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command()", "@click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool): if", "type=str) @click.pass_context def cli(ctx, sender: str, template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard", "@click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver:", "from .billingyard import BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json')", "@click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender: str,", "str, invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor() invoice: Invoice = billing_yard.create_invoice(invoice, receiver)", "'--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat:", "@click.pass_context def cli(ctx, sender: str, template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard =", "invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor() invoice: Invoice = billing_yard.create_invoice(invoice, receiver) billing_yard.print_invoice(invoice)", "= BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str)", "def cli(ctx, sender: str, template: str): ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard)", "@cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard,", "BillingYard, receiver: str, invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor() invoice: Invoice =", "@pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor()", "import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx,", "@click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender: str, template:", "default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender: str, template: str): ctx.obj =", "click from .billingyard import BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str,", ".models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def", "@click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender: str, template: str): ctx.obj = BillingYard(sender,", "template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice', type=str) @click.option('--vat', is_flag=True)", "is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool): if vat:", "receiver: str, invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor() invoice: Invoice = billing_yard.create_invoice(invoice,", "ctx.obj = BillingYard(sender, template) pass_billing_yard = click.make_pass_decorator(BillingYard) @cli.command() @click.option('-r', '--receiver', type=str) @click.option('-i', '--invoice',", "type=str, default='sender.json') @click.option('-t', '--template', type=str) @click.pass_context def cli(ctx, sender: str, template: str): ctx.obj", "import click from .billingyard import BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender',", "<reponame>MartinVondrak/billing-yard<gh_stars>0 import click from .billingyard import BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s',", "'--template', type=str) @click.pass_context def cli(ctx, sender: str, template: str): ctx.obj = BillingYard(sender, template)", "type=str) @click.option('--vat', is_flag=True) @pass_billing_yard def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool):", "issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool): if vat: billing_yard.set_vat_invoice_processor() invoice: Invoice", "BillingYard from .models import Invoice @click.group('billingyard') @click.option('-s', '--sender', type=str, default='sender.json') @click.option('-t', '--template', type=str)" ]
[ "pynamodb.models import Model from pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region", "= 1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True) content = UnicodeAttribute(range_key=True) memo =", "read_capacity_units = 1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True) content = UnicodeAttribute(range_key=True) memo", "Meta: table_name='Blog' region = 'us-west-1' write_capacity_units = 1 read_capacity_units = 1 host =", "Model from pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region = 'us-west-1'", "pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units =", "= 'us-west-1' write_capacity_units = 1 read_capacity_units = 1 host = \"http://dynamodb:8000\" title =", "import Model from pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region =", "class Blog(Model): class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units = 1 read_capacity_units =", "table_name='Blog' region = 'us-west-1' write_capacity_units = 1 read_capacity_units = 1 host = \"http://dynamodb:8000\"", "write_capacity_units = 1 read_capacity_units = 1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True) content", "region = 'us-west-1' write_capacity_units = 1 read_capacity_units = 1 host = \"http://dynamodb:8000\" title", "'us-west-1' write_capacity_units = 1 read_capacity_units = 1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True)", "UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units = 1 read_capacity_units", "Blog(Model): class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units = 1 read_capacity_units = 1", "1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True) content = UnicodeAttribute(range_key=True) memo = UnicodeAttribute()", "<gh_stars>10-100 from pynamodb.models import Model from pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta:", "from pynamodb.models import Model from pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog'", "from pynamodb.attributes import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units", "= 1 read_capacity_units = 1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True) content =", "1 read_capacity_units = 1 host = \"http://dynamodb:8000\" title = UnicodeAttribute(hash_key=True) content = UnicodeAttribute(range_key=True)", "class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units = 1 read_capacity_units = 1 host", "import UnicodeAttribute class Blog(Model): class Meta: table_name='Blog' region = 'us-west-1' write_capacity_units = 1" ]
[ "'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt':", "'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt':", "for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000, 1, 1, 0, 0, 0))", "{ \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting", "\"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY,", "Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE,", "\"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0],", "\"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3,", "main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i", "\"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting =", "\"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\":", "main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in", "\"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt':", "import datetime from backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import", "<gh_stars>1-10 import time from datetime import datetime from backtest_entrance.setting import Info from vnpy.app.data_manager", "'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt':", "ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000, 1, 1,", "Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE,", "= ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000, 1,", "BinancesGateway from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting = {", "= EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine", "Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE,", "'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt':", "datetime from backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine", "binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\",", "import time from datetime import datetime from backtest_entrance.setting import Info from vnpy.app.data_manager import", "\"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\":", "Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE,", "from vnpy.trader.engine import MainEngine binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3,", "Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine =", "time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000, 1, 1, 0, 0,", "binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809,", "Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine", "'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway)", "\"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE,", "main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for", "import EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import", "import ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import", "vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange, Interval from", "\"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt':", "MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5)", "import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance import", "vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting = { \"key\": Info.key.value,", "3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value,", "'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine = MainEngine(event_engine)", "main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items():", "Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE,", "Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = { \"key\": Info.key.value,", "i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000, 1, 1, 0, 0, 0)) print(f'数据下载完成:{i[0]}')", "MainEngine binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\":", "Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE,", "Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine", "Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway)", "\"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = { \"key\": Info.key.value, \"secret\":", "time from datetime import datetime from backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp", "= { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt':", "datetime import datetime from backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine", "Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value,", "Interval from vnpy.trader.engine import MainEngine binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\":", "Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE,", "} event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting,", "= { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\":", "from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import MainEngine", "event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\")", "from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine from", "import MainEngine binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\",", "vnpy.trader.engine import MainEngine binance_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\":", "\"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt':", "vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting", "'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt':", "import BinancesGateway from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting =", "from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange, Interval", "'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, }", "'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt':", "\"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt':", "} binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\":", "Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE,", "'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt':", "EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange,", "'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt':", "Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp)", "main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1],", "'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt':", "\"proxy_port\": 10809, } binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\":", "Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE,", "from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting = { \"key\":", "ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway", "'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt':", "'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting,", "10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE,", "DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway", "EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine =", "Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE,", "'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt':", "{ \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\",", "import Exchange, Interval from vnpy.trader.engine import MainEngine binance_setting = { \"key\": Info.key.value, \"secret\":", "= { \"key\": Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, }", "import BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine", "= MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine)", "manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000,", "event_engine=event_engine) time.sleep(5) for i in symbol_exchange_dict.items(): manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY, start=datetime(2000, 1, 1, 0,", "main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\") main_engine.connect(binances_setting, \"BINANCES\") manager_engine = ManagerEngine(main_engine=main_engine,", "{ 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE,", "from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway from", "from vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway from", "vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant", "'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine =", "} symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt':", "\"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE,", "Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809,", "'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt':", "from datetime import datetime from backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp from", "BinanceGateway from vnpy.gateway.binances import BinancesGateway from vnpy.trader.constant import Exchange, Interval from vnpy.trader.engine import", "'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt':", "Exchange.BINANCE, } event_engine = EventEngine() main_engine = MainEngine(event_engine) main_engine.add_gateway(BinanceGateway) main_engine.add_gateway(BinancesGateway) main_engine.add_app(DataManagerApp) main_engine.connect(binance_setting, \"BINANCE\")", "'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine() main_engine", "3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = {", "\"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict =", "\"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, }", "Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE, 'xlmusdt': Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE,", "vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance import BinanceGateway from vnpy.gateway.binances", "import Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import", "from backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from", "Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt': Exchange.BINANCE,", "Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE,", "\"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE,", "10809, } binances_setting = { \"key\": Info.key.value, \"secret\": Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\",", "symbol_exchange_dict = { 'btcusdt': Exchange.BINANCE, 'ethusdt': Exchange.BINANCE, 'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE,", "Exchange.BINANCE, 'etcusdt': Exchange.BINANCE, 'cocosusdt': Exchange.BINANCE, 'thetausdt': Exchange.BINANCE, 'vetusdt': Exchange.BINANCE, 'eosusdt': Exchange.BINANCE, 'maticusdt': Exchange.BINANCE,", "Exchange.BINANCE, 'trxusdt': Exchange.BINANCE, 'xmrusdt': Exchange.BINANCE, 'neousdt': Exchange.BINANCE, 'fttusdt': Exchange.BINANCE, } event_engine = EventEngine()", "backtest_entrance.setting import Info from vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event", "Info.secret.value, \"会话数\": 3, \"服务器\": \"REAL\", \"合约模式\": \"正向\", \"代理地址\": \"127.0.0.1\", \"代理端口\": 10809, } symbol_exchange_dict", "vnpy.app.data_manager import DataManagerApp from vnpy.app.data_manager.engine import ManagerEngine from vnpy.event import EventEngine from vnpy.gateway.binance", "Info.key.value, \"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = {", "\"secret\": Info.secret.value, \"session_number\": 3, \"proxy_host\": \"127.0.0.1\", \"proxy_port\": 10809, } binances_setting = { \"key\":", "'bnbusdt': Exchange.BINANCE, 'adausdt': Exchange.BINANCE, 'dogeusdt': Exchange.BINANCE, 'xrpusdt': Exchange.BINANCE, 'bchusdt': Exchange.BINANCE, 'linkusdt': Exchange.BINANCE, 'ltcusdt':" ]
[ "\"oval\": self.oval() elif self.__figureType == \"arc\": self.arc() def line(self): width = int(self[\"width\"]) height", "width = width, height = height) self.__figureType = figureType self.__filled = filled self.drawFigure()", "10, start = 0, extent = 145, fill = \"red\") else: self.create_arc(10, 10,", "width - 10, height - 10, start = 0, extent = 145, fill", "start = 0, extent = 145, fill = \"red\") else: self.create_arc(10, 10, width", "getFigureType(self): return self.__figureType def getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType = figureType", "if self.__filled: self.create_arc(10, 10, width - 10, height - 10, start = 0,", "10, width - 10, height - 10, start = 0, extent = 145)", "10, width - 10, height - 10, fill = \"red\") else: self.create_rectangle(10, 10,", "def getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def setFilled(self,", "def oval(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width", "= int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width - 10, height", "fill = \"red\") else: self.create_arc(10, 10, width - 10, height - 10, start", "10) def oval(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10,", "- 10, height - 10, start = 0, extent = 145, fill =", "figureType): self.__figureType = figureType self.drawFigure() def setFilled(self, filled): self.__filled = filled self.drawFigure() def", "= int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width - 10, height - 10, fill", "self.create_oval(10, 10, width - 10, height - 10, fill = \"red\") else: self.create_oval(10,", "self.__figureType = figureType self.__filled = filled self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self):", "10, height - 10, start = 0, extent = 145, fill = \"red\")", "__init__(self, container, figureType, filled = False, width = 100, height = 100): super().__init__(container,", "width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width - 10, height -", "# Import tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType, filled = False, width", "10, width - 10, height - 10) self.create_line(width - 10, 10, 10, height", "filled self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self): return self.__filled def setFigureType(self, figureType):", "= figureType self.drawFigure() def setFilled(self, filled): self.__filled = filled self.drawFigure() def drawFigure(self): if", "width, height = height) self.__figureType = figureType self.__filled = filled self.drawFigure() def getFigureType(self):", "self.oval() elif self.__figureType == \"arc\": self.arc() def line(self): width = int(self[\"width\"]) height =", "self.create_rectangle(10, 10, width - 10, height - 10) def oval(self): width = int(self[\"width\"])", "height - 10) def arc(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled:", "filled self.drawFigure() def drawFigure(self): if self.__figureType == \"line\": self.line() elif self.__figureType == \"rectangle\":", "- 10, height - 10, fill = \"red\") else: self.create_rectangle(10, 10, width -", "10, height - 10) def rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if", "= False, width = 100, height = 100): super().__init__(container, width = width, height", "if self.__filled: self.create_rectangle(10, 10, width - 10, height - 10, fill = \"red\")", "fill = \"red\") else: self.create_rectangle(10, 10, width - 10, height - 10) def", "= 145, fill = \"red\") else: self.create_arc(10, 10, width - 10, height -", "= int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width - 10, height - 10, start", "width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width - 10,", "\"arc\": self.arc() def line(self): width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width", "def setFilled(self, filled): self.__filled = filled self.drawFigure() def drawFigure(self): if self.__figureType == \"line\":", "FigureCanvas(Canvas): def __init__(self, container, figureType, filled = False, width = 100, height =", "import * # Import tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType, filled =", "= filled self.drawFigure() def drawFigure(self): if self.__figureType == \"line\": self.line() elif self.__figureType ==", "fill = \"red\") else: self.create_oval(10, 10, width - 10, height - 10) def", "- 10, height - 10) def oval(self): width = int(self[\"width\"]) height = int(self[\"height\"])", "figureType self.__filled = filled self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self): return self.__filled", "width - 10, height - 10, fill = \"red\") else: self.create_oval(10, 10, width", "\"red\") else: self.create_oval(10, 10, width - 10, height - 10) def arc(self): width", "= height) self.__figureType = figureType self.__filled = filled self.drawFigure() def getFigureType(self): return self.__figureType", "filled): self.__filled = filled self.drawFigure() def drawFigure(self): if self.__figureType == \"line\": self.line() elif", "== \"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval() elif self.__figureType == \"arc\": self.arc()", "Import tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType, filled = False, width =", "container, figureType, filled = False, width = 100, height = 100): super().__init__(container, width", "figureType, filled = False, width = 100, height = 100): super().__init__(container, width =", "height = height) self.__figureType = figureType self.__filled = filled self.drawFigure() def getFigureType(self): return", "width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width - 10,", "= 0, extent = 145, fill = \"red\") else: self.create_arc(10, 10, width -", "= \"red\") else: self.create_arc(10, 10, width - 10, height - 10, start =", "False, width = 100, height = 100): super().__init__(container, width = width, height =", "self.__filled: self.create_rectangle(10, 10, width - 10, height - 10, fill = \"red\") else:", "return self.__filled def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def setFilled(self, filled): self.__filled", "line(self): width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width - 10, height", "def line(self): width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width - 10,", "100, height = 100): super().__init__(container, width = width, height = height) self.__figureType =", "width - 10, height - 10) def arc(self): width = int(self[\"width\"]) height =", "= 100): super().__init__(container, width = width, height = height) self.__figureType = figureType self.__filled", "def rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width", "= 100, height = 100): super().__init__(container, width = width, height = height) self.__figureType", "int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width - 10, height -", "0, extent = 145, fill = \"red\") else: self.create_arc(10, 10, width - 10,", "height = int(self[\"height\"]) self.create_line(10, 10, width - 10, height - 10) self.create_line(width -", "\"red\") else: self.create_arc(10, 10, width - 10, height - 10, start = 0,", "height - 10, fill = \"red\") else: self.create_rectangle(10, 10, width - 10, height", "width - 10, height - 10) self.create_line(width - 10, 10, 10, height -", "- 10, height - 10, fill = \"red\") else: self.create_oval(10, 10, width -", "self.create_arc(10, 10, width - 10, height - 10, start = 0, extent =", "if self.__figureType == \"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType ==", "10, width - 10, height - 10) def oval(self): width = int(self[\"width\"]) height", "else: self.create_rectangle(10, 10, width - 10, height - 10) def oval(self): width =", "10, height - 10) def oval(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if", "* # Import tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType, filled = False,", "- 10, fill = \"red\") else: self.create_oval(10, 10, width - 10, height -", "height - 10, fill = \"red\") else: self.create_oval(10, 10, width - 10, height", "setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def setFilled(self, filled): self.__filled = filled self.drawFigure()", "elif self.__figureType == \"oval\": self.oval() elif self.__figureType == \"arc\": self.arc() def line(self): width", "self.create_line(10, 10, width - 10, height - 10) self.create_line(width - 10, 10, 10,", "height - 10) def oval(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled:", "def getFigureType(self): return self.__figureType def getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType =", "width - 10, height - 10, fill = \"red\") else: self.create_rectangle(10, 10, width", "int(self[\"height\"]) self.create_line(10, 10, width - 10, height - 10) self.create_line(width - 10, 10,", "\"red\") else: self.create_rectangle(10, 10, width - 10, height - 10) def oval(self): width", "rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width -", "int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width - 10, height -", "elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval() elif self.__figureType ==", "= \"red\") else: self.create_oval(10, 10, width - 10, height - 10) def arc(self):", "\"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval() elif", "self.__figureType == \"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType == \"oval\":", "== \"oval\": self.oval() elif self.__figureType == \"arc\": self.arc() def line(self): width = int(self[\"width\"])", "def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def setFilled(self, filled): self.__filled = filled", "self.__filled: self.create_oval(10, 10, width - 10, height - 10, fill = \"red\") else:", "10, height - 10, fill = \"red\") else: self.create_oval(10, 10, width - 10,", "height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width - 10, height - 10,", "def arc(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width", "height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width - 10, height - 10,", "self.__filled: self.create_arc(10, 10, width - 10, height - 10, start = 0, extent", "= int(self[\"height\"]) self.create_line(10, 10, width - 10, height - 10) self.create_line(width - 10,", "= int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width - 10, height - 10, fill", "= int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width - 10, height", "10, width - 10, height - 10) def arc(self): width = int(self[\"width\"]) height", "- 10) self.create_line(width - 10, 10, 10, height - 10) def rectangle(self): width", "from tkinter import * # Import tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType,", "self.__filled def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def setFilled(self, filled): self.__filled =", "\"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval() elif self.__figureType == \"arc\": self.arc() def", "else: self.create_oval(10, 10, width - 10, height - 10) def arc(self): width =", "self.__figureType == \"arc\": self.arc() def line(self): width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10,", "else: self.create_arc(10, 10, width - 10, height - 10, start = 0, extent", "self.create_line(width - 10, 10, 10, height - 10) def rectangle(self): width = int(self[\"width\"])", "- 10, 10, 10, height - 10) def rectangle(self): width = int(self[\"width\"]) height", "10, height - 10) self.create_line(width - 10, 10, 10, height - 10) def", "arc(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width -", "figureType self.drawFigure() def setFilled(self, filled): self.__filled = filled self.drawFigure() def drawFigure(self): if self.__figureType", "= figureType self.__filled = filled self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self): return", "height = 100): super().__init__(container, width = width, height = height) self.__figureType = figureType", "self.arc() def line(self): width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width -", "10, 10, height - 10) def rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"])", "= int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width - 10, height - 10)", "height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width - 10, height - 10,", "height) self.__figureType = figureType self.__filled = filled self.drawFigure() def getFigureType(self): return self.__figureType def", "self.__figureType = figureType self.drawFigure() def setFilled(self, filled): self.__filled = filled self.drawFigure() def drawFigure(self):", "== \"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval()", "tkinter import * # Import tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType, filled", "height - 10) self.create_line(width - 10, 10, 10, height - 10) def rectangle(self):", "extent = 145, fill = \"red\") else: self.create_arc(10, 10, width - 10, height", "self.__figureType == \"oval\": self.oval() elif self.__figureType == \"arc\": self.arc() def line(self): width =", "self.__figureType def getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def", "def __init__(self, container, figureType, filled = False, width = 100, height = 100):", "int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10, width - 10, height - 10, fill =", "10, height - 10, fill = \"red\") else: self.create_rectangle(10, 10, width - 10,", "width = 100, height = 100): super().__init__(container, width = width, height = height)", "= filled self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self): return self.__filled def setFigureType(self,", "self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType", "10, 10, 10, height - 10) def rectangle(self): width = int(self[\"width\"]) height =", "- 10) def arc(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10,", "width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width - 10,", "= int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width - 10, height", "getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure() def setFilled(self, filled):", "100): super().__init__(container, width = width, height = height) self.__figureType = figureType self.__filled =", "int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width - 10, height -", "10, width - 10, height - 10, start = 0, extent = 145,", "self.drawFigure() def setFilled(self, filled): self.__filled = filled self.drawFigure() def drawFigure(self): if self.__figureType ==", "self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval() elif self.__figureType == \"arc\":", "= \"red\") else: self.create_rectangle(10, 10, width - 10, height - 10) def oval(self):", "if self.__filled: self.create_oval(10, 10, width - 10, height - 10, fill = \"red\")", "- 10, fill = \"red\") else: self.create_rectangle(10, 10, width - 10, height -", "self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType == \"oval\": self.oval() elif self.__figureType", "filled = False, width = 100, height = 100): super().__init__(container, width = width,", "self.rectangle() elif self.__figureType == \"oval\": self.oval() elif self.__figureType == \"arc\": self.arc() def line(self):", "- 10) def oval(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10,", "10) def arc(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_arc(10, 10,", "self.__filled = filled self.drawFigure() def getFigureType(self): return self.__figureType def getFilled(self): return self.__filled def", "height - 10) def rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled:", "oval(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width -", "10, height - 10) def arc(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if", "145, fill = \"red\") else: self.create_arc(10, 10, width - 10, height - 10,", "int(self[\"height\"]) if self.__filled: self.create_arc(10, 10, width - 10, height - 10, start =", "elif self.__figureType == \"arc\": self.arc() def line(self): width = int(self[\"width\"]) height = int(self[\"height\"])", "height - 10, start = 0, extent = 145, fill = \"red\") else:", "10) def rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10, 10,", "10, fill = \"red\") else: self.create_rectangle(10, 10, width - 10, height - 10)", "width - 10, height - 10) def oval(self): width = int(self[\"width\"]) height =", "drawFigure(self): if self.__figureType == \"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif self.__figureType", "super().__init__(container, width = width, height = height) self.__figureType = figureType self.__filled = filled", "return self.__figureType def getFilled(self): return self.__filled def setFigureType(self, figureType): self.__figureType = figureType self.drawFigure()", "self.drawFigure() def drawFigure(self): if self.__figureType == \"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle()", "- 10, height - 10) self.create_line(width - 10, 10, 10, height - 10)", "int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10, width - 10, height - 10) self.create_line(width", "== \"arc\": self.arc() def line(self): width = int(self[\"width\"]) height = int(self[\"height\"]) self.create_line(10, 10,", "10) self.create_line(width - 10, 10, 10, height - 10) def rectangle(self): width =", "self.create_rectangle(10, 10, width - 10, height - 10, fill = \"red\") else: self.create_rectangle(10,", "10, fill = \"red\") else: self.create_oval(10, 10, width - 10, height - 10)", "- 10, height - 10) def arc(self): width = int(self[\"width\"]) height = int(self[\"height\"])", "self.__filled = filled self.drawFigure() def drawFigure(self): if self.__figureType == \"line\": self.line() elif self.__figureType", "self.create_oval(10, 10, width - 10, height - 10) def arc(self): width = int(self[\"width\"])", "- 10, start = 0, extent = 145, fill = \"red\") else: self.create_arc(10,", "int(self[\"height\"]) if self.__filled: self.create_oval(10, 10, width - 10, height - 10, fill =", "= width, height = height) self.__figureType = figureType self.__filled = filled self.drawFigure() def", "setFilled(self, filled): self.__filled = filled self.drawFigure() def drawFigure(self): if self.__figureType == \"line\": self.line()", "def drawFigure(self): if self.__figureType == \"line\": self.line() elif self.__figureType == \"rectangle\": self.rectangle() elif", "class FigureCanvas(Canvas): def __init__(self, container, figureType, filled = False, width = 100, height", "- 10) def rectangle(self): width = int(self[\"width\"]) height = int(self[\"height\"]) if self.__filled: self.create_rectangle(10,", "10, width - 10, height - 10, fill = \"red\") else: self.create_oval(10, 10,", "tkinter class FigureCanvas(Canvas): def __init__(self, container, figureType, filled = False, width = 100," ]
[ "are watching.\"\"\" raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file", "watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted',", "BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content =", "note_text_rtf_file = '0' # RTF content of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as", "uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM Note') notes =", "return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True,", "super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event is a valid event to", "(UCS-2) LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content =", "masquerade to decode the ugly file content from UTF-16 (UCS-2) LE with BOM", "decode the ugly file content from UTF-16 (UCS-2) LE with BOM to unicode", "from utils import debug from urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown import", "import PatternMatchingEventHandler from utils import debug from urllib.parse import unquote from rtf.Rtf2Markdown import", "' + event.event_type, err=True) return False def on_any_event(self, event): if not self.is_valid_event(event): pass", "PatternMatchingEventHandler from utils import debug from urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown", "regarding the filetype we are watching.\"\"\" raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface):", "terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to '", "from urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile import", "not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky Notes file', err=True, terminate=True)", "sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True,", "sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = [] # This masquerade to decode", "the ugly file content from UTF-16 (UCS-2) LE with BOM to unicode with", "err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to", "storage[0] # UUID-like string representing the note ID note_text_rtf_file = '0' # RTF", "debug from urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile", "of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf),", "sync_engine = None idle_timeout = None def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine", "+ event.event_type, err=True) return False def on_any_event(self, event): if not self.is_valid_event(event): pass #", "self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM", "utils import debug from urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events", "super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in", "idle_timeout = None def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def", "def get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface):", "import getMarkdown import watchdog.events import olefile import sqlite3 import configparser import codecs import", "args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to return a list of notes", "\"\"\"Check if event is a valid event to be proceesed by the file", "notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return notes", "in self.sidebar_config.sections(): if not section.startswith('Section '): continue if 'NoteCount' not in self.sidebar_config[section]: continue", "INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def", "self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like string representing the note ID note_text_rtf_file", "self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM Note') notes = [{'text':", "as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config", "to return a list of notes regarding the filetype we are watching.\"\"\" raise", "interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in self.sidebar_config.sections(): if not section.startswith('Section '):", "+ '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM Note')", "file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path)", "self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in self.sidebar_config.sections(): if", "sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True)", "return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path", "Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file =", "def get_notes(self): notes = [] # This masquerade to decode the ugly file", "= sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color =", "'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' } database =", "terminate=True) else: debug('Unhandled event type: ' + event.event_type, err=True) return False def on_any_event(self,", "return self.colors_map[note['color']] if note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\"", "storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like string representing the note", "self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in self.sidebar_config.sections(): if not section.startswith('Section '): continue", "watching.\"\"\" raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file =", "True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True)", "watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' + event.dest_path, err=True, terminate=True)", "if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + '", "= note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file", "def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky", "= None def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self,", "= None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a", "if event is a valid event to be proceesed by the file handler.\"\"\"", "else: debug('Unhandled event type: ' + event.event_type, err=True) return False def on_any_event(self, event):", "snt_file = None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t", "file handler\"\"\" sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes", "configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky", "import sqlite3 import configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for", "file content from UTF-16 (UCS-2) LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb')", "return False def on_any_event(self, event): if not self.is_valid_event(event): pass # Restart the idle", "None idle_timeout = None def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns)", "is None else None for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text': unquote(self.sidebar_config[section][key].strip('\"')), 'color':", "codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky Notes file", "def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row", "string representing the note ID note_text_rtf_file = '0' # RTF content of the", "not self.is_valid_event(event): pass # Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout =", "the note ID note_text_rtf_file = '0' # RTF content of the note with", "file handler\"\"\" snt_file = None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path +", "handler\"\"\" snt_file = None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + '", "the filetype we are watching.\"\"\" raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt", "False def on_any_event(self, event): if not self.is_valid_event(event): pass # Restart the idle timeout", "BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in self.sidebar_config.sections():", "import debug from urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events import", "get_notes(self): notes = [] # This masquerade to decode the ugly file content", "sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = []", "+ event.dest_path, err=True, terminate=True) else: debug('Unhandled event type: ' + event.event_type, err=True) return", "be proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False if", "notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green': 'green',", "None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid", "was unexpectedly deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was", "file handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple',", "unexpectedly deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly", "debug('Unhandled event type: ' + event.event_type, err=True) return False def on_any_event(self, event): if", "note in notes_in_db] self.database.close() return notes def get_note_color(self, note): return self.colors_map[note['color']] if note['color']", "database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:'", "err=True, terminate=True) else: debug('Unhandled event type: ' + event.event_type, err=True) return False def", "import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky Notes file handlers.\"\"\"", "self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to return", "False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path +", "sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky Notes file',", "{ 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' } database", "' was unexpectedly deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + '", "# Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for", "event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was", "isn\\'t a valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes", "# RTF content of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf =", "' + event.dest_path, err=True, terminate=True) else: debug('Unhandled event type: ' + event.event_type, err=True)", "self.sidebar_config[ section] and notes_color is None else None for key in self.sidebar_config[section]: if", "content from UTF-16 (UCS-2) LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as", "watchdog.events import olefile import sqlite3 import configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler):", "__init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro',", "get_notes(self): \"\"\"Must be overridden to return a list of notes regarding the filetype", "to ' + event.dest_path, err=True, terminate=True) else: debug('Unhandled event type: ' + event.event_type,", "err=True) return False def on_any_event(self, event): if not self.is_valid_event(event): pass # Restart the", "note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface):", "proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type", "debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' + event.dest_path, err=True, terminate=True) else:", "patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event is", "valid event to be proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path:", "RTF content of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode')", "for note in notes_in_db] self.database.close() return notes def get_note_color(self, note): return self.colors_map[note['color']] if", "__init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = [] # This masquerade to", "!= event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type ==", "unexpectedly moved to ' + event.dest_path, err=True, terminate=True) else: debug('Unhandled event type: '", "in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like string representing the note ID", "on_any_event(self, event): if not self.is_valid_event(event): pass # Restart the idle timeout if self.idle_timeout:", "sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le'))", "to decode the ugly file content from UTF-16 (UCS-2) LE with BOM to", "= { 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' }", "if self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif", "Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def", "== watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' + event.dest_path, err=True,", "threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky Notes file handlers.\"\"\" sync_engine", "= [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0]", "in self.sidebar_config[ section] and notes_color is None else None for key in self.sidebar_config[section]:", "note): return self.colors_map[note['color']] if note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file", "\"\"\"Must be overridden to return a list of notes regarding the filetype we", "self.is_valid_event(event): pass # Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0,", "handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink':", "import configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the", "terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage", "'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = {", "None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = [] # This", "note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite", "'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM", "__init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky Notes", "class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def __init__(self, sync_engine): if not", "from UTF-16 (UCS-2) LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file:", "'?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM Note') notes", "olefile import sqlite3 import configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class", "class for all the Sticky Notes file handlers.\"\"\" sync_engine = None idle_timeout =", "= '0' # RTF content of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content:", "self.database.close() return notes def get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in self.colors_map else", "return a list of notes regarding the filetype we are watching.\"\"\" raise Exception('get_notes", "debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine)", "return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green':", "in notes_in_db] self.database.close() return notes def get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in", "None else None for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text': unquote(self.sidebar_config[section][key].strip('\"')), 'color': notes_color})", "self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow': 'yellow',", "unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] #", "UTF-16 (UCS-2) LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content", "# Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()])", "self.colors_map[note['color']] if note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config", "def get_notes(self): \"\"\"Must be overridden to return a list of notes regarding the", "None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow':", "pass # Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes,", "the Sticky Notes file handlers.\"\"\" sync_engine = None idle_timeout = None def __init__(self,", "[{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return notes def get_note_color(self,", "get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini", "This masquerade to decode the ugly file content from UTF-16 (UCS-2) LE with", "'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' } database = None", "class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine)", "None for section in self.sidebar_config.sections(): if not section.startswith('Section '): continue if 'NoteCount' not", "note_id = storage[0] # UUID-like string representing the note ID note_text_rtf_file = '0'", "'ColorSaved' in self.sidebar_config[ section] and notes_color is None else None for key in", "None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path", "notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map", "# UUID-like string representing the note ID note_text_rtf_file = '0' # RTF content", "= self.database.execute('SELECT Text, Theme FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for", "== watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True) elif event.event_type ==", "handler\"\"\" sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes =", "notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and notes_color is None else", "olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'],", "for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text': unquote(self.sidebar_config[section][key].strip('\"')), 'color': notes_color}) break return notes", "FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close()", "section.startswith('Section '): continue if 'NoteCount' not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if", "note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None", "[] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] #", "is_valid_event(self, event): \"\"\"Check if event is a valid event to be proceesed by", "note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes", "return notes def get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in self.colors_map else None", "SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue',", "with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close()", "Notes file handlers.\"\"\" sync_engine = None idle_timeout = None def __init__(self, sync_engine, patterns=None):", "= olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like string", "self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None def __init__(self,", "+ ' isn\\'t a valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def", "= [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return notes def", "= sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None)", "if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be", "getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map =", "Sticky Notes file handlers.\"\"\" sync_engine = None idle_timeout = None def __init__(self, sync_engine,", "elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' +", "with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove", "self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and notes_color is", "= None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = [] #", "self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event is a", "overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def __init__(self, sync_engine): if", "event.event_type, err=True) return False def on_any_event(self, event): if not self.is_valid_event(event): pass # Restart", "the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED:", "colors_map = { 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink'", "sqlite3 import configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all", "ID note_text_rtf_file = '0' # RTF content of the note with self.snt_file.openstream([note_id, note_text_rtf_file])", "deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved", "to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):]", "unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile import sqlite3 import configparser", "from watchdog.events import PatternMatchingEventHandler from utils import debug from urllib.parse import unquote from", "self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return notes def get_note_color(self, note): return self.colors_map[note['color']]", "continue if 'NoteCount' not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in", "sync_engine=sync_engine) def get_notes(self): notes = [] # This masquerade to decode the ugly", "type: ' + event.event_type, err=True) return False def on_any_event(self, event): if not self.is_valid_event(event):", "= self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and notes_color is None else None", "def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path +", "notes_in_db = self.database.execute('SELECT Text, Theme FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])}", "overridden to return a list of notes regarding the filetype we are watching.\"\"\"", "} database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database =", "section in self.sidebar_config.sections(): if not section.startswith('Section '): continue if 'NoteCount' not in self.sidebar_config[section]:", "self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return", "self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to return a list of notes regarding", "if not section.startswith('Section '): continue if 'NoteCount' not in self.sidebar_config[section]: continue notes_color =", "a list of notes regarding the filetype we are watching.\"\"\" raise Exception('get_notes must", "= sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM Note') notes = [{'text': getMarkdown(note['Text']),", "super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = [] # This masquerade to decode the", "sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='),", "None for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text': unquote(self.sidebar_config[section][key].strip('\"')), 'color': notes_color}) break return", "note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None})", "if 'ColorSaved' in self.sidebar_config[ section] and notes_color is None else None for key", "by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type ==", "elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True) elif", "in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None def", "sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config =", "\"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green': 'green', 'Blue': 'blue', 'Purple':", "UUID-like string representing the note ID note_text_rtf_file = '0' # RTF content of", "class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\" colors_map = { 'Yellow': 'yellow', 'Green': 'green', 'Blue':", "event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED:", "# This masquerade to decode the ugly file content from UTF-16 (UCS-2) LE", "valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = []", "event type: ' + event.event_type, err=True) return False def on_any_event(self, event): if not", "as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class", "is a valid event to be proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path", "= configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in self.sidebar_config.sections(): if not", "getMarkdown import watchdog.events import olefile import sqlite3 import configparser import codecs import threading", "if not self.is_valid_event(event): pass # Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout", "sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None", "debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path", "'0' # RTF content of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf", "all the Sticky Notes file handlers.\"\"\" sync_engine = None idle_timeout = None def", "timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must", "= sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event is a valid", "' isn\\'t a valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self):", "note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color': None}) self.snt_file.close() return notes class SQLiteFileHandler(FileHandlerInterface): \"\"\"plum.sqlite file handler\"\"\"", "idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self):", "'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return notes def get_note_color(self, note): return", "with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content", "LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read()", "+ self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme", "== watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly", "handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True", "'Green': 'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' } database = None def", "Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file", "class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky Notes file handlers.\"\"\" sync_engine =", "from rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile import sqlite3 import configparser import", "streams=False): note_id = storage[0] # UUID-like string representing the note ID note_text_rtf_file =", "the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text': getMarkdown(note_text_rtf), 'color':", "configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in self.sidebar_config.sections(): if not section.startswith('Section", "the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def", "'): continue if 'NoteCount' not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved'", "'green', 'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' } database = None def __init__(self,", "handlers.\"\"\" sync_engine = None idle_timeout = None def __init__(self, sync_engine, patterns=None): self.sync_engine =", "= None idle_timeout = None def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True,", "event to be proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return", "def get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False):", "self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and notes_color is None else None for", "notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id =", "notes = [] # This masquerade to decode the ugly file content from", "open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file: sidebar_config_file_content = sidebar_config_file.read() sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the", "if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path + ' isn\\'t a valid Sticky Notes file', err=True,", "notes def get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in self.colors_map else None class", "list of notes regarding the filetype we are watching.\"\"\" raise Exception('get_notes must be", "Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return", "notes regarding the filetype we are watching.\"\"\" raise Exception('get_notes must be overridden') class", "__init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if", "sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event", "'Pink': 'pink' } database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self):", "\"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path): debug(sync_engine.sticky_notes_file_path", "= threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to return a", "threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to return a list", "def is_valid_event(self, event): \"\"\"Check if event is a valid event to be proceesed", "self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return True elif event.event_type", "+ ' was unexpectedly moved to ' + event.dest_path, err=True, terminate=True) else: debug('Unhandled", "event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' + event.dest_path,", "a valid Sticky Notes file', err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes =", "= sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT", "get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db", "= storage[0] # UUID-like string representing the note ID note_text_rtf_file = '0' #", "be overridden to return a list of notes regarding the filetype we are", "FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky Notes file handlers.\"\"\" sync_engine = None", "sqlite3.Row notes_in_db = self.database.execute('SELECT Text, Theme FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color':", "sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color", "import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base class for all the Sticky Notes", "self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like", "file handlers.\"\"\" sync_engine = None idle_timeout = None def __init__(self, sync_engine, patterns=None): self.sync_engine", "self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db =", "def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self): notes = [] # This masquerade", "= None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' +", "for section in self.sidebar_config.sections(): if not section.startswith('Section '): continue if 'NoteCount' not in", "watchdog.events import PatternMatchingEventHandler from utils import debug from urllib.parse import unquote from rtf.Rtf2Markdown", "must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def __init__(self,", "file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED: return", "raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None", "event): if not self.is_valid_event(event): pass # Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel()", "rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile import sqlite3 import configparser import codecs", "Text, Theme FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in", "import unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile import sqlite3 import", "in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and notes_color", "filetype we are watching.\"\"\" raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file", "urllib.parse import unquote from rtf.Rtf2Markdown import getMarkdown import watchdog.events import olefile import sqlite3", "def on_any_event(self, event): if not self.is_valid_event(event): pass # Restart the idle timeout if", "event.dest_path, err=True, terminate=True) else: debug('Unhandled event type: ' + event.event_type, err=True) return False", "olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like string representing", "get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for storage in self.snt_file.listdir(storages=True, streams=False): note_id", "of notes regarding the filetype we are watching.\"\"\" raise Exception('get_notes must be overridden')", "continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and notes_color is None", "event): \"\"\"Check if event is a valid event to be proceesed by the", "notes_in_db] self.database.close() return notes def get_note_color(self, note): return self.colors_map[note['color']] if note['color'] in self.colors_map", "'Purple': 'purple', 'Pink': 'pink' } database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine)", "self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to return a list of", "self.sidebar_config.sections(): if not section.startswith('Section '): continue if 'NoteCount' not in self.sidebar_config[section]: continue notes_color", "and notes_color is None else None for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text':", "else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None def __init__(self, sync_engine):", "SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def __init__(self, sync_engine): if not olefile.isOleFile(sync_engine.sticky_notes_file_path):", "super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory", "getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db] self.database.close() return notes def get_note_color(self, note):", "for storage in self.snt_file.listdir(storages=True, streams=False): note_id = storage[0] # UUID-like string representing the", "= [] # This masquerade to decode the ugly file content from UTF-16", "\"\"\"Base class for all the Sticky Notes file handlers.\"\"\" sync_engine = None idle_timeout", "Theme FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db]", "notes_color = None for section in self.sidebar_config.sections(): if not section.startswith('Section '): continue if", "not section.startswith('Section '): continue if 'NoteCount' not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"')", "'blue', 'Purple': 'purple', 'Pink': 'pink' } database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'],", "+ ' was unexpectedly deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED: debug(self.sync_engine.sticky_notes_file_path +", "sync_engine=sync_engine) def get_notes(self): self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory =", "err=True, terminate=True) super().__init__(patterns=['*.snt'], sync_engine=sync_engine) def get_notes(self): notes = [] self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path) for", "for all the Sticky Notes file handlers.\"\"\" sync_engine = None idle_timeout = None", "representing the note ID note_text_rtf_file = '0' # RTF content of the note", "event.event_type == watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True) elif event.event_type", "'pink' } database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def get_notes(self): self.database", "if 'NoteCount' not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[", "'Blue': 'blue', 'Purple': 'purple', 'Pink': 'pink' } database = None def __init__(self, sync_engine):", "import watchdog.events import olefile import sqlite3 import configparser import codecs import threading class", "ugly file content from UTF-16 (UCS-2) LE with BOM to unicode with open(self.sync_engine.sticky_notes_file_path,", "[] # This masquerade to decode the ugly file content from UTF-16 (UCS-2)", "section] and notes_color is None else None for key in self.sidebar_config[section]: if key.isdigit():", "self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden", "the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section in", "= None for section in self.sidebar_config.sections(): if not section.startswith('Section '): continue if 'NoteCount'", "be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\" snt_file = None def __init__(self, sync_engine):", "self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start() def get_notes(self): \"\"\"Must be overridden to", "import olefile import sqlite3 import configparser import codecs import threading class FileHandlerInterface(PatternMatchingEventHandler): \"\"\"Base", "a valid event to be proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path !=", "moved to ' + event.dest_path, err=True, terminate=True) else: debug('Unhandled event type: ' +", "if note['color'] in self.colors_map else None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config =", "None def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event):", "'NoteCount' not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section]", "notes_color is None else None for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text': unquote(self.sidebar_config[section][key].strip('\"')),", "else None for key in self.sidebar_config[section]: if key.isdigit(): notes.append({'text': unquote(self.sidebar_config[section][key].strip('\"')), 'color': notes_color}) break", "'purple', 'Pink': 'pink' } database = None def __init__(self, sync_engine): super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine) def", "content of the note with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content: note_text_rtf = note_content.read().decode('unicode') notes.append({'text':", "watchdog.events.EVENT_TYPE_DELETED: debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True) elif event.event_type == watchdog.events.EVENT_TYPE_MOVED:", "Restart the idle timeout if self.idle_timeout: self.idle_timeout.cancel() self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()]) self.idle_timeout.start()", "\"\"\"Settings.ini file handler\"\"\" sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'], sync_engine=sync_engine) def get_notes(self):", "we are watching.\"\"\" raise Exception('get_notes must be overridden') class SNTFileHandler(FileHandlerInterface): \"\"\"StickyNotes.snt file handler\"\"\"", "None class INIFileHandler(FileHandlerInterface): \"\"\"Settings.ini file handler\"\"\" sidebar_config = None def __init__(self, sync_engine): super().__init__(patterns=['*.ini'],", "patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event is a valid event to be", "not in self.sidebar_config[section]: continue notes_color = self.sidebar_config[section]['ColorSaved'].strip('\"') if 'ColorSaved' in self.sidebar_config[ section] and", "was unexpectedly moved to ' + event.dest_path, err=True, terminate=True) else: debug('Unhandled event type:", "sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True) self.database.row_factory = sqlite3.Row notes_in_db = self.database.execute('SELECT Text,", "sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check if event is a valid event", "event is a valid event to be proceesed by the file handler.\"\"\" if", "note ID note_text_rtf_file = '0' # RTF content of the note with self.snt_file.openstream([note_id,", "Remove the BOM self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None) self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le')) notes_color = None for section", "def __init__(self, sync_engine, patterns=None): self.sync_engine = sync_engine super().__init__(ignore_directories=True, patterns=patterns) def is_valid_event(self, event): \"\"\"Check", "to be proceesed by the file handler.\"\"\" if self.sync_engine.sticky_notes_file_path != event.src_path: return False", "self.database.execute('SELECT Text, Theme FROM Note') notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note", "' was unexpectedly moved to ' + event.dest_path, err=True, terminate=True) else: debug('Unhandled event" ]
[ "f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if hasattr(self, cache_name): return getattr(self, cache_name) val", "cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if hasattr(self, cache_name): return getattr(self,", "return val return class AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\" created_at =", "cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if hasattr(self, cache_name): return", "def inner(self, *args, **kwargs): if hasattr(self, cache_name): return getattr(self, cache_name) val = func(self,", "All Utils used on this package module live here \"\"\" from django.db import", "@wraps(func) def inner(self, *args, **kwargs): if hasattr(self, cache_name): return getattr(self, cache_name) val =", "getattr(self, cache_name) val = func(self, *args, **kwargs) setattr(self, cache_name, val) return val return", "<gh_stars>0 \"\"\" All Utils used on this package module live here \"\"\" from", "def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if hasattr(self, cache_name):", "return class AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False,", "cache_name): return getattr(self, cache_name) val = func(self, *args, **kwargs) setattr(self, cache_name, val) return", "func(self, *args, **kwargs) setattr(self, cache_name, val) return val return class AuditModel(models.Model): \"\"\"A common", "= func(self, *args, **kwargs) setattr(self, cache_name, val) return val return class AuditModel(models.Model): \"\"\"A", "if hasattr(self, cache_name): return getattr(self, cache_name) val = func(self, *args, **kwargs) setattr(self, cache_name,", "*args, **kwargs) setattr(self, cache_name, val) return val return class AuditModel(models.Model): \"\"\"A common audit", "\"\"\"A common audit model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True) modified_at =", "import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if", "django.db import models from functools import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func)", "return getattr(self, cache_name) val = func(self, *args, **kwargs) setattr(self, cache_name, val) return val", "AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True) modified_at", "module live here \"\"\" from django.db import models from functools import wraps def", "common audit model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True) modified_at = models.DateTimeField(null=False,", "models from functools import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self,", "**kwargs) setattr(self, cache_name, val) return val return class AuditModel(models.Model): \"\"\"A common audit model", "val) return val return class AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\" created_at", "*args, **kwargs): if hasattr(self, cache_name): return getattr(self, cache_name) val = func(self, *args, **kwargs)", "= f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if hasattr(self, cache_name): return getattr(self, cache_name)", "live here \"\"\" from django.db import models from functools import wraps def cached_attribute(func):", "val return class AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\" created_at = models.DateTimeField(null=False,", "cache_name) val = func(self, *args, **kwargs) setattr(self, cache_name, val) return val return class", "class AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True)", "model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True) modified_at = models.DateTimeField(null=False, blank=False, auto_now=True)", "cache_name, val) return val return class AuditModel(models.Model): \"\"\"A common audit model for tracking\"\"\"", "\"\"\" All Utils used on this package module live here \"\"\" from django.db", "here \"\"\" from django.db import models from functools import wraps def cached_attribute(func): cache_name", "setattr(self, cache_name, val) return val return class AuditModel(models.Model): \"\"\"A common audit model for", "inner(self, *args, **kwargs): if hasattr(self, cache_name): return getattr(self, cache_name) val = func(self, *args,", "wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs): if hasattr(self,", "import models from functools import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def", "on this package module live here \"\"\" from django.db import models from functools", "**kwargs): if hasattr(self, cache_name): return getattr(self, cache_name) val = func(self, *args, **kwargs) setattr(self,", "used on this package module live here \"\"\" from django.db import models from", "Utils used on this package module live here \"\"\" from django.db import models", "\"\"\" from django.db import models from functools import wraps def cached_attribute(func): cache_name =", "package module live here \"\"\" from django.db import models from functools import wraps", "from django.db import models from functools import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\"", "hasattr(self, cache_name): return getattr(self, cache_name) val = func(self, *args, **kwargs) setattr(self, cache_name, val)", "audit model for tracking\"\"\" created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True) modified_at = models.DateTimeField(null=False, blank=False,", "functools import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args, **kwargs):", "this package module live here \"\"\" from django.db import models from functools import", "val = func(self, *args, **kwargs) setattr(self, cache_name, val) return val return class AuditModel(models.Model):", "from functools import wraps def cached_attribute(func): cache_name = f\"_{func.__name__}\" @wraps(func) def inner(self, *args," ]
[ "URLs rather than using an RSS feed or a sitemap. It needs the", "{urls}\") for url in urls: if url != '': yield Request(url, dont_filter=True) def", "import logging from scrapy.spiders import Spider from scrapy.http import Request logger = logging.getLogger(__name__)", "than using an RSS feed or a sitemap. It needs the # SPECIFIED_URIS_FILE", "RSS feed or a sitemap. It needs the # SPECIFIED_URIS_FILE setting set up", "urls = [u.strip() for u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for", "scrapy.spiders import Spider from scrapy.http import Request logger = logging.getLogger(__name__) # This spider", "urls: if url != '': yield Request(url, dont_filter=True) def parse(self, response): return self.parse_page(response)", "[u.strip() for u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url in", "configured (e.g. in \" \"settings.py) to point to a file containing a \"", "REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE", "Spider from scrapy.http import Request logger = logging.getLogger(__name__) # This spider is a", "if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and will give", "= [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with '", "[] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider", "' 'NewsSpecifiedSpider and will give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to '", "those attempting to crawl and parse a specified # list of URLs rather", "'': yield Request(url, dont_filter=True) def parse(self, response): return self.parse_page(response) def parse_page(self, response): raise", "# -*- coding: utf-8 -*- import logging from scrapy.spiders import Spider from scrapy.http", "logging from scrapy.spiders import Spider from scrapy.http import Request logger = logging.getLogger(__name__) #", "not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \" \"settings.py) to point to", "from scrapy.spiders import Spider from scrapy.http import Request logger = logging.getLogger(__name__) # This", "to ' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must", "spider is a base for those attempting to crawl and parse a specified", "logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url in urls: if url != '':", "SPECIFIED_URL_FILE: {urls}\") for url in urls: if url != '': yield Request(url, dont_filter=True)", "coding: utf-8 -*- import logging from scrapy.spiders import Spider from scrapy.http import Request", "self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r') as f: urls = [u.strip() for", "with open(startfn, 'r') as f: urls = [u.strip() for u in f.readlines()] logger.debug(f\"URLs", "logging.getLogger(__name__) # This spider is a base for those attempting to crawl and", "point to a file containing a \" \"list of URLs.\") return for url", "file containing a \" \"list of URLs.\") return for url in self.start_urls: yield", "start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with", "set up to point to a file with a list of URLs. class", "give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn", "up to point to a file with a list of URLs. class NewsSpecifiedSpider(Spider):", "if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \" \"settings.py) to point", "self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and will give spurious", "attempting to crawl and parse a specified # list of URLs rather than", "for url in self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r') as f: urls", "f: urls = [u.strip() for u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\")", "a base for those attempting to crawl and parse a specified # list", "'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if", "\" \"list of URLs.\") return for url in self.start_urls: yield Request(url, dont_filter=True) with", "f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url in urls: if url !=", "' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be", "a \" \"list of URLs.\") return for url in self.start_urls: yield Request(url, dont_filter=True)", "to point to a file with a list of URLs. class NewsSpecifiedSpider(Spider): start_urls", "with a list of URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if", "\"list of URLs.\") return for url in self.start_urls: yield Request(url, dont_filter=True) with open(startfn,", "yield Request(url, dont_filter=True) def parse(self, response): return self.parse_page(response) def parse_page(self, response): raise NotImplementedError", "a specified # list of URLs rather than using an RSS feed or", "point to a file with a list of URLs. class NewsSpecifiedSpider(Spider): start_urls =", "u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url in urls: if", "-*- import logging from scrapy.spiders import Spider from scrapy.http import Request logger =", "a file with a list of URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def", "a file containing a \" \"list of URLs.\") return for url in self.start_urls:", "'r') as f: urls = [u.strip() for u in f.readlines()] logger.debug(f\"URLs read from", "for url in urls: if url != '': yield Request(url, dont_filter=True) def parse(self,", "start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and will", "!= '': yield Request(url, dont_filter=True) def parse(self, response): return self.parse_page(response) def parse_page(self, response):", "URLs.\") return for url in self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r') as", "in self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r') as f: urls = [u.strip()", "if url != '': yield Request(url, dont_filter=True) def parse(self, response): return self.parse_page(response) def", "sitemap. It needs the # SPECIFIED_URIS_FILE setting set up to point to a", "in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url in urls: if url", "with ' 'NewsSpecifiedSpider and will give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to", "\" \"settings.py) to point to a file containing a \" \"list of URLs.\")", "-*- coding: utf-8 -*- import logging from scrapy.spiders import Spider from scrapy.http import", "for those attempting to crawl and parse a specified # list of URLs", "be configured (e.g. in \" \"settings.py) to point to a file containing a", "in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g.", "startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \"", "\"settings.py) to point to a file containing a \" \"list of URLs.\") return", "Request(url, dont_filter=True) with open(startfn, 'r') as f: urls = [u.strip() for u in", "list of URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') ==", "URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl", "a list of URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED')", "spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn =", "import Spider from scrapy.http import Request logger = logging.getLogger(__name__) # This spider is", "SPECIFIED_URIS_FILE setting set up to point to a file with a list of", "= logging.getLogger(__name__) # This spider is a base for those attempting to crawl", "open(startfn, 'r') as f: urls = [u.strip() for u in f.readlines()] logger.debug(f\"URLs read", "url in urls: if url != '': yield Request(url, dont_filter=True) def parse(self, response):", "a sitemap. It needs the # SPECIFIED_URIS_FILE setting set up to point to", "= [u.strip() for u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url", "specified # list of URLs rather than using an RSS feed or a", "setting set up to point to a file with a list of URLs.", "crawl and parse a specified # list of URLs rather than using an", "setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn:", "is incompatible with ' 'NewsSpecifiedSpider and will give spurious ' 'warnings. Try setting", "to a file with a list of URLs. class NewsSpecifiedSpider(Spider): start_urls = []", "import Request logger = logging.getLogger(__name__) # This spider is a base for those", "url in self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r') as f: urls =", "and parse a specified # list of URLs rather than using an RSS", "list of URLs rather than using an RSS feed or a sitemap. It", "class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is", "will give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.')", "read from SPECIFIED_URL_FILE: {urls}\") for url in urls: if url != '': yield", "Try setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not", "'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured", "True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and will give spurious ' 'warnings.", "' 'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False in settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE')", "base for those attempting to crawl and parse a specified # list of", "from SPECIFIED_URL_FILE: {urls}\") for url in urls: if url != '': yield Request(url,", "to point to a file containing a \" \"list of URLs.\") return for", "an RSS feed or a sitemap. It needs the # SPECIFIED_URIS_FILE setting set", "It needs the # SPECIFIED_URIS_FILE setting set up to point to a file", "url != '': yield Request(url, dont_filter=True) def parse(self, response): return self.parse_page(response) def parse_page(self,", "NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible", "to a file containing a \" \"list of URLs.\") return for url in", "for u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE: {urls}\") for url in urls:", "as f: urls = [u.strip() for u in f.readlines()] logger.debug(f\"URLs read from SPECIFIED_URL_FILE:", "of URLs.\") return for url in self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r')", "# list of URLs rather than using an RSS feed or a sitemap.", "(e.g. in \" \"settings.py) to point to a file containing a \" \"list", "parse a specified # list of URLs rather than using an RSS feed", "containing a \" \"list of URLs.\") return for url in self.start_urls: yield Request(url,", "or a sitemap. It needs the # SPECIFIED_URIS_FILE setting set up to point", "logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and will give spurious ' 'warnings. Try", "dont_filter=True) with open(startfn, 'r') as f: urls = [u.strip() for u in f.readlines()]", "startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \" \"settings.py) to point to a", "self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \" \"settings.py) to", "file with a list of URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self):", "of URLs rather than using an RSS feed or a sitemap. It needs", "the # SPECIFIED_URIS_FILE setting set up to point to a file with a", "rather than using an RSS feed or a sitemap. It needs the #", "logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \" \"settings.py) to point to a file", "from scrapy.http import Request logger = logging.getLogger(__name__) # This spider is a base", "incompatible with ' 'NewsSpecifiedSpider and will give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED", "and will give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False in", "# This spider is a base for those attempting to crawl and parse", "# SPECIFIED_URIS_FILE setting set up to point to a file with a list", "settings.py.') startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in", "using an RSS feed or a sitemap. It needs the # SPECIFIED_URIS_FILE setting", "must be configured (e.g. in \" \"settings.py) to point to a file containing", "needs the # SPECIFIED_URIS_FILE setting set up to point to a file with", "= self.crawler.settings.get('SPECIFIED_URLS_FILE') if not startfn: logger.critical(\"SPECIFIED_URLS_FILE must be configured (e.g. in \" \"settings.py)", "scrapy.http import Request logger = logging.getLogger(__name__) # This spider is a base for", "is a base for those attempting to crawl and parse a specified #", "This spider is a base for those attempting to crawl and parse a", "def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and", "logger = logging.getLogger(__name__) # This spider is a base for those attempting to", "Request logger = logging.getLogger(__name__) # This spider is a base for those attempting", "return for url in self.start_urls: yield Request(url, dont_filter=True) with open(startfn, 'r') as f:", "to crawl and parse a specified # list of URLs rather than using", "utf-8 -*- import logging from scrapy.spiders import Spider from scrapy.http import Request logger", "in \" \"settings.py) to point to a file containing a \" \"list of", "yield Request(url, dont_filter=True) with open(startfn, 'r') as f: urls = [u.strip() for u", "'NewsSpecifiedSpider and will give spurious ' 'warnings. Try setting REFETCHCONTROL_ENABLED to ' 'False", "of URLs. class NewsSpecifiedSpider(Spider): start_urls = [] def start_requests(self): if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True:", "in urls: if url != '': yield Request(url, dont_filter=True) def parse(self, response): return", "== True: logger.warning('RefetchControl is incompatible with ' 'NewsSpecifiedSpider and will give spurious '", "feed or a sitemap. It needs the # SPECIFIED_URIS_FILE setting set up to" ]
[ "#6 on a list of all English words in the dictionary. See this", "search of \", n, \" words = \", round(cumulative_time,5), \" seconds\") print(\"Average search", "\"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary", "\"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%% testing", "e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should be case", "all English words in the dictionary. See this for a tip on how", "remove when not debugging #if(size): # print (\"While searching for word: \", word,", "# -*- coding: utf-8 -*- \"\"\" Practical Algorthns Problem set: Unit 5, 1.1", "search should be case in-sensitive. 7. Now, test your binary search algorithm from", "\", list1[0], \" of size \", size) #base case if size == 0:", "return False #item found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() < word.lower()): return", "#%% sequential search def binary_search_word(list1, word): \"\"\" Carry out a binary search of", "Note the time taken to search for a word. Compare it with your", "if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%% test binary", "small list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The", "\"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test", "7. Now, test your binary search algorithm from #6 on a list of", "\"aaaaaaaa\"] n = len(list_of_words_to_search) results = {} cumulative_time = 0 for word in", "4. Modify your binary search algorithm (from #3) to work with words rather", "result from #5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search", "testing sequential search on list of dictionary words from nltk.corpus import words word_list", "be searched Returns ------- True/False \"\"\" size = len(list1) mid = size //", "Modify your binary search algorithm (from #3) to work with words rather than", "in results.items(): print(k, v) print(\"\\nTotal time to carry out search of \", n,", "# print (\"While searching for word: \", word, \", binary search called on", "#item found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word)", "carry out search of \", n, \" words = \", round(cumulative_time,5), \" seconds\")", "sorted list for a given word Parameters ---------- list1: input list, sorted word:", "list**\") print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime taken to search various words", "be case in-sensitive. 7. Now, test your binary search algorithm from #6 on", "binary_search_word(word_list, word) end = timer() time_taken = end-start results[word] = (round(time_taken,5), found) cumulative_time", "0: return False #item found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() < word.lower()):", "import words word_list = words.words() # prints 236736 print (len(word_list)) from timeit import", "at : \", list1[0], \" of size \", size) #base case if size", "#debug message, remove when not debugging #if(size): # print (\"While searching for word:", "(len(word_list)) from timeit import default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\",", "on a small list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\",", "word) else: return binary_search_word(list1[0:mid], word) #%% test binary search mylist = [\"Monday\", \"Tuesday\",", "\"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%% testing sequential search on", "found = binary_search_word(word_list, word) end = timer() time_taken = end-start results[word] = (round(time_taken,5),", "tip on how to get a list of all dictionary words. Note the", "search test passed\") #%% testing sequential search on list of dictionary words from", "case if size == 0: return False #item found if(list1[mid].lower()==word.lower()): return True #recursive", "words rather than integers. Test it on a small list of words, e.g.,", "print(\"\\nTime taken to search various words and the result:\") for k,v in results.items():", "the result:\") for k,v in results.items(): print(k, v) print(\"\\nTotal time to carry out", "found) cumulative_time += time_taken print(\"\\n** Binary Search of word list**\") print(\"Search for these", "a tip on how to get a list of all dictionary words. Note", "end-start results[word] = (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary Search of word", "passed\") #%% testing sequential search on list of dictionary words from nltk.corpus import", "for these words: \", list_of_words_to_search) print(\"\\nTime taken to search various words and the", "search algorithm from #6 on a list of all English words in the", "search of the given sorted list for a given word Parameters ---------- list1:", "dictionary words. Note the time taken to search for a word. Compare it", "print (len(word_list)) from timeit import default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\",", "found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else:", "= [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n =", "to get a list of all dictionary words. Note the time taken to", "for word in list_of_words_to_search: start = timer() found = binary_search_word(word_list, word) end =", "message, remove when not debugging #if(size): # print (\"While searching for word: \",", "print(k, v) print(\"\\nTotal time to carry out search of \", n, \" words", "len(list_of_words_to_search) results = {} cumulative_time = 0 for word in list_of_words_to_search: start =", "in-sensitive. 7. Now, test your binary search algorithm from #6 on a list", "called on this list starting at : \", list1[0], \" of size \",", "for word: \", word, \", binary search called on this list starting at", "= timer() time_taken = end-start results[word] = (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n**", "your binary search algorithm (from #3) to work with words rather than integers.", "start = timer() found = binary_search_word(word_list, word) end = timer() time_taken = end-start", "[\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search)", "time to carry out search of \", n, \" words = \", round(cumulative_time,5),", "the given sorted list for a given word Parameters ---------- list1: input list,", "\", round(cumulative_time,5), \" seconds\") print(\"Average search time per word = \", round(cumulative_time/n,5), \"", "Unit 5, 1.1 Problem statement: 4. Modify your binary search algorithm (from #3)", "these words: \", list_of_words_to_search) print(\"\\nTime taken to search various words and the result:\")", "\"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results = {} cumulative_time = 0", "word) end = timer() time_taken = end-start results[word] = (round(time_taken,5), found) cumulative_time +=", "\"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results = {} cumulative_time", "word: the value to be searched Returns ------- True/False \"\"\" size = len(list1)", "Problem set: Unit 5, 1.1 Problem statement: 4. Modify your binary search algorithm", "print(\"\\nTotal time to carry out search of \", n, \" words = \",", ": \", list1[0], \" of size \", size) #base case if size ==", "from #5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def", "of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should", "\"\"\" #%% sequential search def binary_search_word(list1, word): \"\"\" Carry out a binary search", "how to get a list of all dictionary words. Note the time taken", "search def binary_search_word(list1, word): \"\"\" Carry out a binary search of the given", "of size \", size) #base case if size == 0: return False #item", "assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%% testing sequential search on list", "word list**\") print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime taken to search various", "= \", round(cumulative_time,5), \" seconds\") print(\"Average search time per word = \", round(cumulative_time/n,5),", "search for a word. Compare it with your timing result from #5, and", "+= time_taken print(\"\\n** Binary Search of word list**\") print(\"Search for these words: \",", "word_list = words.words() # prints 236736 print (len(word_list)) from timeit import default_timer as", "for a given word Parameters ---------- list1: input list, sorted word: the value", "your timing result from #5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%%", "size == 0: return False #item found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower()", "this for a tip on how to get a list of all dictionary", "0 for word in list_of_words_to_search: start = timer() found = binary_search_word(word_list, word) end", "= len(list1) mid = size // 2 #debug message, remove when not debugging", "list, sorted word: the value to be searched Returns ------- True/False \"\"\" size", "\"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search", "[\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False)", "list for a given word Parameters ---------- list1: input list, sorted word: the", "-*- coding: utf-8 -*- \"\"\" Practical Algorthns Problem set: Unit 5, 1.1 Problem", "binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%% test binary search mylist = [\"Monday\",", "results = {} cumulative_time = 0 for word in list_of_words_to_search: start = timer()", "the dictionary. See this for a tip on how to get a list", "end = timer() time_taken = end-start results[word] = (round(time_taken,5), found) cumulative_time += time_taken", "\"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results", "words = \", round(cumulative_time,5), \" seconds\") print(\"Average search time per word = \",", "1.1 Problem statement: 4. Modify your binary search algorithm (from #3) to work", "prints 236736 print (len(word_list)) from timeit import default_timer as timer list_of_words_to_search = [\"yesterday\",", "a list of all English words in the dictionary. See this for a", "timer() found = binary_search_word(word_list, word) end = timer() time_taken = end-start results[word] =", "should be case in-sensitive. 7. Now, test your binary search algorithm from #6", "print(\"\\n** Binary Search of word list**\") print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime", "search various words and the result:\") for k,v in results.items(): print(k, v) print(\"\\nTotal", "\", word, \", binary search called on this list starting at : \",", "input list, sorted word: the value to be searched Returns ------- True/False \"\"\"", "to work with words rather than integers. Test it on a small list", "words from nltk.corpus import words word_list = words.words() # prints 236736 print (len(word_list))", "The search should be case in-sensitive. 7. Now, test your binary search algorithm", "word. Compare it with your timing result from #5, and comment on your", "and the result:\") for k,v in results.items(): print(k, v) print(\"\\nTotal time to carry", "for a word. Compare it with your timing result from #5, and comment", "= words.words() # prints 236736 print (len(word_list)) from timeit import default_timer as timer", "and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1, word):", "# prints 236736 print (len(word_list)) from timeit import default_timer as timer list_of_words_to_search =", "\"Saturday\", \"Sunday\"]. The search should be case in-sensitive. 7. Now, test your binary", "round(cumulative_time,5), \" seconds\") print(\"Average search time per word = \", round(cumulative_time/n,5), \" seconds\")", "2 #debug message, remove when not debugging #if(size): # print (\"While searching for", "mid = size // 2 #debug message, remove when not debugging #if(size): #", "#if(size): # print (\"While searching for word: \", word, \", binary search called", "#base case if size == 0: return False #item found if(list1[mid].lower()==word.lower()): return True", "various words and the result:\") for k,v in results.items(): print(k, v) print(\"\\nTotal time", "Binary Search of word list**\") print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime taken", "comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1, word): \"\"\"", "\" words = \", round(cumulative_time,5), \" seconds\") print(\"Average search time per word =", "search algorithm (from #3) to work with words rather than integers. Test it", "findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1, word): \"\"\" Carry out a", "test binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert", "Returns ------- True/False \"\"\" size = len(list1) mid = size // 2 #debug", "list_of_words_to_search: start = timer() found = binary_search_word(word_list, word) end = timer() time_taken =", "words. Note the time taken to search for a word. Compare it with", "word: \", word, \", binary search called on this list starting at :", "to carry out search of \", n, \" words = \", round(cumulative_time,5), \"", "list_of_words_to_search) print(\"\\nTime taken to search various words and the result:\") for k,v in", "5, 1.1 Problem statement: 4. Modify your binary search algorithm (from #3) to", "when not debugging #if(size): # print (\"While searching for word: \", word, \",", "size \", size) #base case if size == 0: return False #item found", "return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%% test binary search mylist =", "#%% test binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]", "binary search algorithm from #6 on a list of all English words in", "(from #3) to work with words rather than integers. Test it on a", "// 2 #debug message, remove when not debugging #if(size): # print (\"While searching", "words in the dictionary. See this for a tip on how to get", "n = len(list_of_words_to_search) results = {} cumulative_time = 0 for word in list_of_words_to_search:", "to be searched Returns ------- True/False \"\"\" size = len(list1) mid = size", "-*- \"\"\" Practical Algorthns Problem set: Unit 5, 1.1 Problem statement: 4. Modify", "Parameters ---------- list1: input list, sorted word: the value to be searched Returns", "[\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should be case in-sensitive.", "#%% testing sequential search on list of dictionary words from nltk.corpus import words", "< word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%% test binary search", "English words in the dictionary. See this for a tip on how to", "as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\",", "https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1, word): \"\"\" Carry out a binary", "time_taken print(\"\\n** Binary Search of word list**\") print(\"Search for these words: \", list_of_words_to_search)", "= [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist,", "= {} cumulative_time = 0 for word in list_of_words_to_search: start = timer() found", "\", binary search called on this list starting at : \", list1[0], \"", "binary search algorithm (from #3) to work with words rather than integers. Test", "algorithm (from #3) to work with words rather than integers. Test it on", "\"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results = {}", "work with words rather than integers. Test it on a small list of", "taken to search various words and the result:\") for k,v in results.items(): print(k,", "= timer() found = binary_search_word(word_list, word) end = timer() time_taken = end-start results[word]", "See this for a tip on how to get a list of all", "default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\",", "\"\"\" Practical Algorthns Problem set: Unit 5, 1.1 Problem statement: 4. Modify your", "else: return binary_search_word(list1[0:mid], word) #%% test binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\",", "timer() time_taken = end-start results[word] = (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary", "starting at : \", list1[0], \" of size \", size) #base case if", "------- True/False \"\"\" size = len(list1) mid = size // 2 #debug message,", "taken to search for a word. Compare it with your timing result from", "list of dictionary words from nltk.corpus import words word_list = words.words() # prints", "list starting at : \", list1[0], \" of size \", size) #base case", "print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime taken to search various words and", "searched Returns ------- True/False \"\"\" size = len(list1) mid = size // 2", "of all dictionary words. Note the time taken to search for a word.", "result:\") for k,v in results.items(): print(k, v) print(\"\\nTotal time to carry out search", "than integers. Test it on a small list of words, e.g., [\"Monday\", \"Tuesday\",", "\"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%%", "\", n, \" words = \", round(cumulative_time,5), \" seconds\") print(\"Average search time per", "timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"]", "\"Sunday\"]. The search should be case in-sensitive. 7. Now, test your binary search", "= len(list_of_words_to_search) results = {} cumulative_time = 0 for word in list_of_words_to_search: start", "\"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results = {} cumulative_time =", "\"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should be case in-sensitive. 7.", "v) print(\"\\nTotal time to carry out search of \", n, \" words =", "size) #base case if size == 0: return False #item found if(list1[mid].lower()==word.lower()): return", "word in list_of_words_to_search: start = timer() found = binary_search_word(word_list, word) end = timer()", "to search for a word. Compare it with your timing result from #5,", "a small list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"].", "of all English words in the dictionary. See this for a tip on", "utf-8 -*- \"\"\" Practical Algorthns Problem set: Unit 5, 1.1 Problem statement: 4.", "case in-sensitive. 7. Now, test your binary search algorithm from #6 on a", "print (\"While searching for word: \", word, \", binary search called on this", "for a tip on how to get a list of all dictionary words.", "statement: 4. Modify your binary search algorithm (from #3) to work with words", "list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n", "a given word Parameters ---------- list1: input list, sorted word: the value to", "n, \" words = \", round(cumulative_time,5), \" seconds\") print(\"Average search time per word", "from #6 on a list of all English words in the dictionary. See", "= (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary Search of word list**\") print(\"Search", "coding: utf-8 -*- \"\"\" Practical Algorthns Problem set: Unit 5, 1.1 Problem statement:", "search called on this list starting at : \", list1[0], \" of size", "debugging #if(size): # print (\"While searching for word: \", word, \", binary search", "True/False \"\"\" size = len(list1) mid = size // 2 #debug message, remove", "if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else: return", "assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%% testing sequential", "on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1, word): \"\"\" Carry", "from nltk.corpus import words word_list = words.words() # prints 236736 print (len(word_list)) from", "---------- list1: input list, sorted word: the value to be searched Returns -------", "results[word] = (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary Search of word list**\")", "Test it on a small list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",", "\"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should be case in-sensitive. 7. Now,", "get a list of all dictionary words. Note the time taken to search", "import default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\",", "{} cumulative_time = 0 for word in list_of_words_to_search: start = timer() found =", "list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search", "#5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1,", "#3) to work with words rather than integers. Test it on a small", "(round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary Search of word list**\") print(\"Search for", "dictionary. See this for a tip on how to get a list of", "sequential search on list of dictionary words from nltk.corpus import words word_list =", "a binary search of the given sorted list for a given word Parameters", "with your timing result from #5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\"", "\"\"\" Carry out a binary search of the given sorted list for a", "word, \", binary search called on this list starting at : \", list1[0],", "if size == 0: return False #item found if(list1[mid].lower()==word.lower()): return True #recursive call", "the value to be searched Returns ------- True/False \"\"\" size = len(list1) mid", "\"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should be case in-sensitive. 7. Now, test", "cumulative_time += time_taken print(\"\\n** Binary Search of word list**\") print(\"Search for these words:", "on list of dictionary words from nltk.corpus import words word_list = words.words() #", "list1: input list, sorted word: the value to be searched Returns ------- True/False", "binary search of the given sorted list for a given word Parameters ----------", "Algorthns Problem set: Unit 5, 1.1 Problem statement: 4. Modify your binary search", "of word list**\") print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime taken to search", "= end-start results[word] = (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary Search of", "\"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\")", "searching for word: \", word, \", binary search called on this list starting", "on a list of all English words in the dictionary. See this for", "= size // 2 #debug message, remove when not debugging #if(size): # print", "to search various words and the result:\") for k,v in results.items(): print(k, v)", "binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist,", "Problem statement: 4. Modify your binary search algorithm (from #3) to work with", "of dictionary words from nltk.corpus import words word_list = words.words() # prints 236736", "\"Friday\", \"Saturday\", \"Sunday\"]. The search should be case in-sensitive. 7. Now, test your", "True #recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word)", "return True #recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid],", "(\"While searching for word: \", word, \", binary search called on this list", "mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True) assert", "= 0 for word in list_of_words_to_search: start = timer() found = binary_search_word(word_list, word)", "this list starting at : \", list1[0], \" of size \", size) #base", "words word_list = words.words() # prints 236736 print (len(word_list)) from timeit import default_timer", "on this list starting at : \", list1[0], \" of size \", size)", "sorted word: the value to be searched Returns ------- True/False \"\"\" size =", "Compare it with your timing result from #5, and comment on your findings.", "list of all English words in the dictionary. See this for a tip", "words and the result:\") for k,v in results.items(): print(k, v) print(\"\\nTotal time to", "size = len(list1) mid = size // 2 #debug message, remove when not", "test passed\") #%% testing sequential search on list of dictionary words from nltk.corpus", "of \", n, \" words = \", round(cumulative_time,5), \" seconds\") print(\"Average search time", "k,v in results.items(): print(k, v) print(\"\\nTotal time to carry out search of \",", "Practical Algorthns Problem set: Unit 5, 1.1 Problem statement: 4. Modify your binary", "\"Funday\")==False) print(\"Binary search test passed\") #%% testing sequential search on list of dictionary", "Carry out a binary search of the given sorted list for a given", "False #item found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size],", "binary_search_word(list1[0:mid], word) #%% test binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\",", "a word. Compare it with your timing result from #5, and comment on", "search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"] assert (binary_search_word(mylist, \"monDay\")==True)", "#recursive call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%%", "\", list_of_words_to_search) print(\"\\nTime taken to search various words and the result:\") for k,v", "cumulative_time = 0 for word in list_of_words_to_search: start = timer() found = binary_search_word(word_list,", "sequential search def binary_search_word(list1, word): \"\"\" Carry out a binary search of the", "out a binary search of the given sorted list for a given word", "list1[0], \" of size \", size) #base case if size == 0: return", "\" of size \", size) #base case if size == 0: return False", "it with your timing result from #5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/", "binary_search_word(list1, word): \"\"\" Carry out a binary search of the given sorted list", "call if(list1[mid].lower() < word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%% test", "binary search called on this list starting at : \", list1[0], \" of", "timeit import default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\", \"zoo\",", "print(\"Binary search test passed\") #%% testing sequential search on list of dictionary words", "size // 2 #debug message, remove when not debugging #if(size): # print (\"While", "your binary search algorithm from #6 on a list of all English words", "nltk.corpus import words word_list = words.words() # prints 236736 print (len(word_list)) from timeit", "words.words() # prints 236736 print (len(word_list)) from timeit import default_timer as timer list_of_words_to_search", "word): \"\"\" Carry out a binary search of the given sorted list for", "with words rather than integers. Test it on a small list of words,", "all dictionary words. Note the time taken to search for a word. Compare", "list of all dictionary words. Note the time taken to search for a", "given sorted list for a given word Parameters ---------- list1: input list, sorted", "= binary_search_word(word_list, word) end = timer() time_taken = end-start results[word] = (round(time_taken,5), found)", "for k,v in results.items(): print(k, v) print(\"\\nTotal time to carry out search of", "from timeit import default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\", \"waqar\", \"different\", \"obtuse\",", "out search of \", n, \" words = \", round(cumulative_time,5), \" seconds\") print(\"Average", "\"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results = {} cumulative_time = 0 for word", "set: Unit 5, 1.1 Problem statement: 4. Modify your binary search algorithm (from", "value to be searched Returns ------- True/False \"\"\" size = len(list1) mid =", "given word Parameters ---------- list1: input list, sorted word: the value to be", "236736 print (len(word_list)) from timeit import default_timer as timer list_of_words_to_search = [\"yesterday\", \"omuamua\",", "word.lower()): return binary_search_word(list1[mid+1:size], word) else: return binary_search_word(list1[0:mid], word) #%% test binary search mylist", "Now, test your binary search algorithm from #6 on a list of all", "== 0: return False #item found if(list1[mid].lower()==word.lower()): return True #recursive call if(list1[mid].lower() <", "time taken to search for a word. Compare it with your timing result", "integers. Test it on a small list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\",", "rather than integers. Test it on a small list of words, e.g., [\"Monday\",", "the time taken to search for a word. Compare it with your timing", "def binary_search_word(list1, word): \"\"\" Carry out a binary search of the given sorted", "results.items(): print(k, v) print(\"\\nTotal time to carry out search of \", n, \"", "words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]. The search should be", "word Parameters ---------- list1: input list, sorted word: the value to be searched", "(binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%% testing sequential search on list of", "return binary_search_word(list1[0:mid], word) #%% test binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",", "it on a small list of words, e.g., [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\",", "algorithm from #6 on a list of all English words in the dictionary.", "(binary_search_word(mylist, \"monDay\")==True) assert (binary_search_word(mylist, \"Funday\")==False) print(\"Binary search test passed\") #%% testing sequential search", "your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential search def binary_search_word(list1, word): \"\"\" Carry out", "word) #%% test binary search mylist = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\",", "on how to get a list of all dictionary words. Note the time", "timing result from #5, and comment on your findings. https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/ \"\"\" #%% sequential", "in the dictionary. See this for a tip on how to get a", "of the given sorted list for a given word Parameters ---------- list1: input", "not debugging #if(size): # print (\"While searching for word: \", word, \", binary", "\", size) #base case if size == 0: return False #item found if(list1[mid].lower()==word.lower()):", "len(list1) mid = size // 2 #debug message, remove when not debugging #if(size):", "dictionary words from nltk.corpus import words word_list = words.words() # prints 236736 print", "search on list of dictionary words from nltk.corpus import words word_list = words.words()", "in list_of_words_to_search: start = timer() found = binary_search_word(word_list, word) end = timer() time_taken", "test your binary search algorithm from #6 on a list of all English", "Search of word list**\") print(\"Search for these words: \", list_of_words_to_search) print(\"\\nTime taken to", "\"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results = {} cumulative_time = 0 for", "a list of all dictionary words. Note the time taken to search for", "\"\"\" size = len(list1) mid = size // 2 #debug message, remove when", "words: \", list_of_words_to_search) print(\"\\nTime taken to search various words and the result:\") for", "time_taken = end-start results[word] = (round(time_taken,5), found) cumulative_time += time_taken print(\"\\n** Binary Search", "\"waqar\", \"different\", \"obtuse\", \"zoo\", \"aardvark\", \"style\", \"zaazoozum\", \"aaaaaaaa\"] n = len(list_of_words_to_search) results =" ]
[ "= np.array([1, 2, 3]) b = np.array([4, 5, 6]) dot_product = np.dot(a, b)", "as np a = np.array([1, 2, 3]) b = np.array([4, 5, 6]) dot_product", "np a = np.array([1, 2, 3]) b = np.array([4, 5, 6]) dot_product =", "a = np.array([1, 2, 3]) b = np.array([4, 5, 6]) dot_product = np.dot(a,", "numpy as np a = np.array([1, 2, 3]) b = np.array([4, 5, 6])", "import numpy as np a = np.array([1, 2, 3]) b = np.array([4, 5,", "np.array([1, 2, 3]) b = np.array([4, 5, 6]) dot_product = np.dot(a, b) print(dot_product)" ]
[ "image. \"\"\" class Text: \"\"\"A simple class to define text extracted from an", "app_window ): # instance variables unique to each instance self.text = text self.app_window", "): # instance variables unique to each instance self.text = text self.app_window =", "def __init__( self, text, app_window ): # instance variables unique to each instance", "variables unique to each instance self.text = text self.app_window = app_window # handles", "replace = True # update text edit with newly extracted text self.app_window.text_output(text, replace)", "image\"\"\" def __init__( self, text, app_window ): # instance variables unique to each", "CST 205 Project: Group 16 Text.py 5/8/2020 This class is responsible for handling", "Project: Group 16 Text.py 5/8/2020 This class is responsible for handling text extracted", "<NAME>, <NAME>, <NAME>, <NAME> CST 205 Project: Group 16 Text.py 5/8/2020 This class", "Text.py 5/8/2020 This class is responsible for handling text extracted from image. \"\"\"", "<NAME>, <NAME>, <NAME> CST 205 Project: Group 16 Text.py 5/8/2020 This class is", "extracted from an image\"\"\" def __init__( self, text, app_window ): # instance variables", "\"\"\" <NAME>, <NAME>, <NAME>, <NAME>, <NAME> CST 205 Project: Group 16 Text.py 5/8/2020", "handling text extracted from image. \"\"\" class Text: \"\"\"A simple class to define", "# instance variables unique to each instance self.text = text self.app_window = app_window", "app_window # handles overlaying of text output replace = True # update text", "from image. \"\"\" class Text: \"\"\"A simple class to define text extracted from", "define text extracted from an image\"\"\" def __init__( self, text, app_window ): #", "__init__( self, text, app_window ): # instance variables unique to each instance self.text", "class to define text extracted from an image\"\"\" def __init__( self, text, app_window", "\"\"\" class Text: \"\"\"A simple class to define text extracted from an image\"\"\"", "to each instance self.text = text self.app_window = app_window # handles overlaying of", "\"\"\"A simple class to define text extracted from an image\"\"\" def __init__( self,", "text, app_window ): # instance variables unique to each instance self.text = text", "each instance self.text = text self.app_window = app_window # handles overlaying of text", "Text: \"\"\"A simple class to define text extracted from an image\"\"\" def __init__(", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME> CST 205 Project: Group 16 Text.py 5/8/2020 This", "This class is responsible for handling text extracted from image. \"\"\" class Text:", "# handles overlaying of text output replace = True # update text edit", "an image\"\"\" def __init__( self, text, app_window ): # instance variables unique to", "instance self.text = text self.app_window = app_window # handles overlaying of text output", "of text output replace = True # update text edit with newly extracted", "to define text extracted from an image\"\"\" def __init__( self, text, app_window ):", "text extracted from an image\"\"\" def __init__( self, text, app_window ): # instance", "self.text = text self.app_window = app_window # handles overlaying of text output replace", "for handling text extracted from image. \"\"\" class Text: \"\"\"A simple class to", "output replace = True # update text edit with newly extracted text self.app_window.text_output(text,", "205 Project: Group 16 Text.py 5/8/2020 This class is responsible for handling text", "from an image\"\"\" def __init__( self, text, app_window ): # instance variables unique", "= text self.app_window = app_window # handles overlaying of text output replace =", "text self.app_window = app_window # handles overlaying of text output replace = True", "overlaying of text output replace = True # update text edit with newly", "<NAME>, <NAME> CST 205 Project: Group 16 Text.py 5/8/2020 This class is responsible", "<NAME> CST 205 Project: Group 16 Text.py 5/8/2020 This class is responsible for", "instance variables unique to each instance self.text = text self.app_window = app_window #", "unique to each instance self.text = text self.app_window = app_window # handles overlaying", "handles overlaying of text output replace = True # update text edit with", "extracted from image. \"\"\" class Text: \"\"\"A simple class to define text extracted", "is responsible for handling text extracted from image. \"\"\" class Text: \"\"\"A simple", "responsible for handling text extracted from image. \"\"\" class Text: \"\"\"A simple class", "self.app_window = app_window # handles overlaying of text output replace = True #", "simple class to define text extracted from an image\"\"\" def __init__( self, text,", "Group 16 Text.py 5/8/2020 This class is responsible for handling text extracted from", "class Text: \"\"\"A simple class to define text extracted from an image\"\"\" def", "= app_window # handles overlaying of text output replace = True # update", "self, text, app_window ): # instance variables unique to each instance self.text =", "text output replace = True # update text edit with newly extracted text", "16 Text.py 5/8/2020 This class is responsible for handling text extracted from image.", "text extracted from image. \"\"\" class Text: \"\"\"A simple class to define text", "5/8/2020 This class is responsible for handling text extracted from image. \"\"\" class", "class is responsible for handling text extracted from image. \"\"\" class Text: \"\"\"A" ]
[ "'+ str(a) + ' and ' + str(b)) ## --- if cmd ==", "+ str(b)) ## --- if cmd == 'add': foo = ml.add(a,b) print('the sum", "as ml ## --- input parameters ---(from command line or shell script) cmd", "math_lib as ml ## --- input parameters ---(from command line or shell script)", "str(a) + ' and ' + str(b)) ## --- if cmd == 'add':", "= ml.add(a,b) print('the sum is: '+ str(foo)) if cmd == 'div': foo =", "for Scientists # Description: Intro to using GitHub Classroom. Practice with creating and", "script) cmd = sys.argv[1] #command (can be 'add' or 'div') a = float(sys.argv[2])", "a = float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ## print statements: print('input values", "'add': foo = ml.add(a,b) print('the sum is: '+ str(foo)) if cmd == 'div':", "This file imports funcitons div and add from math_lib.py import sys import math_lib", "are '+ str(a) + ' and ' + str(b)) ## --- if cmd", "foo = ml.add(a,b) print('the sum is: '+ str(foo)) if cmd == 'div': foo", "#numerical ## print statements: print('input values are '+ str(a) + ' and '", "== 'add': foo = ml.add(a,b) print('the sum is: '+ str(foo)) if cmd ==", "values are '+ str(a) + ' and ' + str(b)) ## --- if", "shell script) cmd = sys.argv[1] #command (can be 'add' or 'div') a =", "of class MCDB6440: Software Engineering for Scientists # Description: Intro to using GitHub", "#numerical b = float(sys.argv[3]) #numerical ## print statements: print('input values are '+ str(a)", "and uploading files to github etc. # This file imports funcitons div and", "github etc. # This file imports funcitons div and add from math_lib.py import", "# School assignment: For Assignment #0 of class MCDB6440: Software Engineering for Scientists", "assignment: For Assignment #0 of class MCDB6440: Software Engineering for Scientists # Description:", "Practice with creating and uploading files to github etc. # This file imports", "File: calculate.py # Initial date: 4 Sept 2020 # Author: <NAME> # School", "float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ## print statements: print('input values are '+", "Description: Intro to using GitHub Classroom. Practice with creating and uploading files to", "# This file imports funcitons div and add from math_lib.py import sys import", "Scientists # Description: Intro to using GitHub Classroom. Practice with creating and uploading", "add from math_lib.py import sys import math_lib as ml ## --- input parameters", "2020 # Author: <NAME> # School assignment: For Assignment #0 of class MCDB6440:", "Classroom. Practice with creating and uploading files to github etc. # This file", "print statements: print('input values are '+ str(a) + ' and ' + str(b))", "ml ## --- input parameters ---(from command line or shell script) cmd =", "For Assignment #0 of class MCDB6440: Software Engineering for Scientists # Description: Intro", "#!/usr/bin/env python # File: calculate.py # Initial date: 4 Sept 2020 # Author:", "input parameters ---(from command line or shell script) cmd = sys.argv[1] #command (can", "## --- if cmd == 'add': foo = ml.add(a,b) print('the sum is: '+", "be 'add' or 'div') a = float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ##", "ml.add(a,b) print('the sum is: '+ str(foo)) if cmd == 'div': foo = ml.div(a,b)", "print('input values are '+ str(a) + ' and ' + str(b)) ## ---", "#0 of class MCDB6440: Software Engineering for Scientists # Description: Intro to using", "Software Engineering for Scientists # Description: Intro to using GitHub Classroom. Practice with", "and ' + str(b)) ## --- if cmd == 'add': foo = ml.add(a,b)", "4 Sept 2020 # Author: <NAME> # School assignment: For Assignment #0 of", "files to github etc. # This file imports funcitons div and add from", "date: 4 Sept 2020 # Author: <NAME> # School assignment: For Assignment #0", "class MCDB6440: Software Engineering for Scientists # Description: Intro to using GitHub Classroom.", "file imports funcitons div and add from math_lib.py import sys import math_lib as", "b = float(sys.argv[3]) #numerical ## print statements: print('input values are '+ str(a) +", "Author: <NAME> # School assignment: For Assignment #0 of class MCDB6440: Software Engineering", "line or shell script) cmd = sys.argv[1] #command (can be 'add' or 'div')", "Intro to using GitHub Classroom. Practice with creating and uploading files to github", "GitHub Classroom. Practice with creating and uploading files to github etc. # This", "sys import math_lib as ml ## --- input parameters ---(from command line or", "is: '+ str(foo)) if cmd == 'div': foo = ml.div(a,b) print('first/second is: '+", "parameters ---(from command line or shell script) cmd = sys.argv[1] #command (can be", "# Author: <NAME> # School assignment: For Assignment #0 of class MCDB6440: Software", "---(from command line or shell script) cmd = sys.argv[1] #command (can be 'add'", "Assignment #0 of class MCDB6440: Software Engineering for Scientists # Description: Intro to", "cmd == 'add': foo = ml.add(a,b) print('the sum is: '+ str(foo)) if cmd", "cmd = sys.argv[1] #command (can be 'add' or 'div') a = float(sys.argv[2]) #numerical", "to github etc. # This file imports funcitons div and add from math_lib.py", "python # File: calculate.py # Initial date: 4 Sept 2020 # Author: <NAME>", "#command (can be 'add' or 'div') a = float(sys.argv[2]) #numerical b = float(sys.argv[3])", "MCDB6440: Software Engineering for Scientists # Description: Intro to using GitHub Classroom. Practice", "# Initial date: 4 Sept 2020 # Author: <NAME> # School assignment: For", "using GitHub Classroom. Practice with creating and uploading files to github etc. #", "float(sys.argv[3]) #numerical ## print statements: print('input values are '+ str(a) + ' and", "Initial date: 4 Sept 2020 # Author: <NAME> # School assignment: For Assignment", "div and add from math_lib.py import sys import math_lib as ml ## ---", "## --- input parameters ---(from command line or shell script) cmd = sys.argv[1]", "= sys.argv[1] #command (can be 'add' or 'div') a = float(sys.argv[2]) #numerical b", "= float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ## print statements: print('input values are", "or 'div') a = float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ## print statements:", "with creating and uploading files to github etc. # This file imports funcitons", "imports funcitons div and add from math_lib.py import sys import math_lib as ml", "import sys import math_lib as ml ## --- input parameters ---(from command line", "## print statements: print('input values are '+ str(a) + ' and ' +", "' and ' + str(b)) ## --- if cmd == 'add': foo =", "Sept 2020 # Author: <NAME> # School assignment: For Assignment #0 of class", "Engineering for Scientists # Description: Intro to using GitHub Classroom. Practice with creating", "sys.argv[1] #command (can be 'add' or 'div') a = float(sys.argv[2]) #numerical b =", "funcitons div and add from math_lib.py import sys import math_lib as ml ##", "and add from math_lib.py import sys import math_lib as ml ## --- input", "sum is: '+ str(foo)) if cmd == 'div': foo = ml.div(a,b) print('first/second is:", "import math_lib as ml ## --- input parameters ---(from command line or shell", "etc. # This file imports funcitons div and add from math_lib.py import sys", "--- if cmd == 'add': foo = ml.add(a,b) print('the sum is: '+ str(foo))", "math_lib.py import sys import math_lib as ml ## --- input parameters ---(from command", "School assignment: For Assignment #0 of class MCDB6440: Software Engineering for Scientists #", "= float(sys.argv[3]) #numerical ## print statements: print('input values are '+ str(a) + '", "'div') a = float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ## print statements: print('input", "to using GitHub Classroom. Practice with creating and uploading files to github etc.", "+ ' and ' + str(b)) ## --- if cmd == 'add': foo", "'+ str(foo)) if cmd == 'div': foo = ml.div(a,b) print('first/second is: '+ str(foo))", "statements: print('input values are '+ str(a) + ' and ' + str(b)) ##", "print('the sum is: '+ str(foo)) if cmd == 'div': foo = ml.div(a,b) print('first/second", "(can be 'add' or 'div') a = float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical", "command line or shell script) cmd = sys.argv[1] #command (can be 'add' or", "# Description: Intro to using GitHub Classroom. Practice with creating and uploading files", "calculate.py # Initial date: 4 Sept 2020 # Author: <NAME> # School assignment:", "' + str(b)) ## --- if cmd == 'add': foo = ml.add(a,b) print('the", "# File: calculate.py # Initial date: 4 Sept 2020 # Author: <NAME> #", "<NAME> # School assignment: For Assignment #0 of class MCDB6440: Software Engineering for", "--- input parameters ---(from command line or shell script) cmd = sys.argv[1] #command", "uploading files to github etc. # This file imports funcitons div and add", "str(b)) ## --- if cmd == 'add': foo = ml.add(a,b) print('the sum is:", "'add' or 'div') a = float(sys.argv[2]) #numerical b = float(sys.argv[3]) #numerical ## print", "from math_lib.py import sys import math_lib as ml ## --- input parameters ---(from", "creating and uploading files to github etc. # This file imports funcitons div", "or shell script) cmd = sys.argv[1] #command (can be 'add' or 'div') a", "if cmd == 'add': foo = ml.add(a,b) print('the sum is: '+ str(foo)) if" ]
[ "params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title':", "scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def", "endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') )", "def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params =", "def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo():", "foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout", "endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code", "mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}')", "test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response", "ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json()", "= self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests =", "= 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406", "= self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay()", "'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self):", "from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout,", "2), ('c', 3), ('username', 1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase):", "= doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError", "3), ('username', 1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self):", "Content-Type header must be application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location':", "import unittest import mocker from scieloapi import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase):", "class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response))", "self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda:", "test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response", "exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400", "doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code", "{'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)])", "requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1, 'api_key':", "def test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2),", "self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests", "mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual(", "502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable,", "scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get", "response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code =", "self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type']", "lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions", "e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code", "properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers", "httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code =", "requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import", "'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g.", "requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda:", "from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError,", "doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code", "self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By", "1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def", "self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response)", "= self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None,", "test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2)", "self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}')", "1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def", "scieloapi import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub()", "test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response", "mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post", "3), ('username', 1)]) def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2), ('c', 3)]", "scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class", "test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get =", "mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\"", "response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code =", "response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response =", "mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None)", "headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post)", "def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self):", "headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual(", "httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def", "@httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException", "= self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def", "def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments", "self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda:", "doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code", "\"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals']", "self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests", "self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError,", "to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo())", "self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post", "\"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'),", "mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests')", "CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def", "'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments = ['https://manager.scielo.org', 'api', 'v1', 'journals']", "[('api_key', 2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key',", "= 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200", "response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response =", "def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self):", "404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed,", "response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code =", "[('api_key', 2), ('c', 3), ('username', 1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class", "= 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503", "mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'})", "test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\"", "import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\"", "'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import", "httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def", "def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self):", "406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError,", "response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response =", "self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response =", "test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise", "= 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404", "properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json()", "mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get", "foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError", "params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean:", "lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response))", "test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise", "self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2,", "def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self):", "test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response", "= doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub()", "= self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get)", "test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I", "self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def", "x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/',", ") class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/')", "'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests =", "must be application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2)", "httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def", "def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201)", "requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from", "httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org',", "self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header", "self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests", "self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response =", "'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/')", "verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title':", "doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to", "mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda:", "test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments =", "= ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api',", "def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self):", "mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests =", "response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code =", "def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments", "'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\":", "self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests", "data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json \"\"\"", "mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests", "to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo())", "= self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class", "self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests", "= 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403", "self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests", "= {'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username',", "'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post", "foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def", "self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self):", "httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def", ") def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post =", "class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def", "self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests')", "lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response))", "x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals',", "self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay()", "scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def", "test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response =", "'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments = ['https://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments),", "200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import", "mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x:", "@httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects", "self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual(", "response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response =", "self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests", "to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo())", "self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self):", "resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code", "data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1',", "self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}')", "== 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/',", "@httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout", "\"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response =", "import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\"", "requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import", "mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response)", "3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_None_returns_None(self): params = None", "scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2)", "doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code", "def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get", "mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') )", "doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code", "1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By", "PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import", "self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments =", "def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self):", "self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY,", "self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params = [('username', 1),", "response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response =", "response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response =", "self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False)", "('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2), ('c',", "lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response))", "requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda:", "unittest import mocker from scieloapi import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def", "lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response))", "lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions", "test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response", "@httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError", "self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self):", "405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable,", "to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo())", "foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError", "import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code", "import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest,", "self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests", "('c', 3), ('username', 1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def", "'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments),", "raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1,", "path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org',", "params = [('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3),", "endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api',", "response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response =", "self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/',", "import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def", "\"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\":", "= 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502", "self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda:", "'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments),", "def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json \"\"\" import requests mock_response =", "endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json", "self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'}", "doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code", "'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response)", "\"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2)", "test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response", "foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def", "headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/',", "= doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub()", "httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def", "def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response", "import mocker from scieloapi import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self):", "response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code =", "mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests =", "requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from", "test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c',", "<filename>tests/test_httpbroker.py import unittest import mocker from scieloapi import httpbroker, exceptions import doubles class", "def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to", "data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410)", "= self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class", "By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response)", "requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from", "'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self):", "mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests')", "'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals']", "to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo())", "lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2, 'c':", "3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params = [('username',", "headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/',", "test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/',", "MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self):", "\"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError()", "endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location':", "\"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException()", "= ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments = ['https://manager.scielo.org', 'api',", "def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2),", "httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70')", "response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response =", "= [('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username',", "['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments = ['https://manager.scielo.org', 'api', 'v1',", "I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title':", "path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments = ['https://manager.scielo.org',", "= doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub()", "= doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub()", "resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def", "x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay()", "\"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3)", "response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response =", "\"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2)", "self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError,", "class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\"", "requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/',", "@httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params", "self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response", "mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} )", "['http://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1',", "scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201)", "def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo():", "requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from", "def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo():", "test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response", "{'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version,", "scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code", "self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual(", "self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests')", "\"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase):", "\"\"\" Content-Type header must be application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers", "= self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post", "= doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub()", "\"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get =", "'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] ==", "None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g.", "response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code =", "503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response))", "def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self):", "500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway,", "self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params))", ") def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code", "lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions", "test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c',", "httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly", "requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import", "self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase):", "mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/',", "mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY,", "mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def", "I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location':", "resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean:", "mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\":", "x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual(", "raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\"", ") def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json \"\"\" import requests mock_response", "= doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub()", "response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code =", "test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise", "doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda:", "endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I", "self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post)", "mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda", "self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self): path_segments", "import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\"", "doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code", "self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self):", "data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'})", "scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def", "self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response = doubles.RequestsResponseStub() response.status_code = 502 self.assertRaises(exceptions.BadGateway, lambda:", "self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response)", "2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params", "lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions", "self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response", ") class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4", "data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals',", "lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title':", "lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response))", "lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda: httpbroker.check_http_status(response))", "403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound,", "400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized,", "requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda:", "response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError", "scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def", "foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username':", "httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def", "'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests", ") def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200)", "[('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)])", "self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')),", "\"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout()", "class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\"", "mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests =", "requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to scieloapi.exceptions.Timeout \"\"\" import", "from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError,", "def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to", "headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals',", "\"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json \"\"\" import", "def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo():", "\"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError()", "'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments = ['https://manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'https://manager.scielo.org/api/v1/journals/')", "self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')),", "self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda:", "= self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/',", "raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\"", "httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers", "foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2, 'c': 3}", "test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise", "requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/',", "response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code =", "= 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\"", "self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post", "mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'})", "mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/',", "mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response)", "mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' )", "('username', 1)]) def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params),", "TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def", "def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to", "mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock()", "mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay()", "self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405 self.assertRaises(exceptions.MethodNotAllowed, lambda:", "httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def", "import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock()", "test_missing_scheme(self): path_segments = ['manager.scielo.org', 'api', 'v1', 'journals'] self.assertEqual(httpbroker._make_full_url(*path_segments), 'http://manager.scielo.org/api/v1/journals/') def test_https(self): path_segments =", "= self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x:", "('username', 1)]) def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\"", "2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_None_returns_None(self): params", "self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}')", "\"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects()", "be application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code", "self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\"", "raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\"", "raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\"", "\"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self):", "import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post", "doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code", "def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self): \"\"\" from requests.exceptions.Timeout to", "test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response)", "self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase): def test_missing_trailing_slash(self):", "response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from", "httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response)", "doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code", "params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'),", "GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import", "x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals',", "'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params =", "= self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def", "class TranslateExceptionsTests(unittest.TestCase): def test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions", "foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError", "mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} )", "params = {'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3),", "def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo():", "httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be", "response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code =", "lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response))", "('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_None_returns_None(self):", "= 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401", "= 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 405", "'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header must be application/json \"\"\" import requests", "import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY,", "= self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay()", "test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response", "self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests')", "= self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response)", "test_from_ConnectionError_to_ConnectionError(self): \"\"\" from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise", "self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class", "headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals',", "PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params), [('api_key',", "self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'}", "self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'),", "import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\"", "def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self):", "self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) class MakeFullUrlFunctionTests(unittest.TestCase):", "lambda: httpbroker.check_http_status(response)) def test_406_raises_NotAcceptable(self): response = doubles.RequestsResponseStub() response.status_code = 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response))", "self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('http://manager.scielo.org/api/v1/journals/70/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response)", "self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4", "x['User-Agent'].startswith('scieloapi/')), params=None) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'),", "= doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub()", "2), ('c', 3), ('username', 1)]) def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2),", "401 self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden,", "def test_None_returns_None(self): params = None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly", "self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response = doubles.RequestsResponseStub() response.status_code = 401 self.assertRaises(exceptions.Unauthorized, lambda:", "mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self): import", "requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\":", "self.assertRaises(exceptions.BadGateway, lambda: httpbroker.check_http_status(response)) def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda:", "application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201)", "= doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub()", "httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda: httpbroker.check_http_status(response)) def", "foo()) def test_from_TooManyRedirects_to_HTTPError(self): \"\"\" from requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def", "\"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.Timeout() self.assertRaises(exceptions.Timeout, lambda: foo()) def test_from_TooManyRedirects_to_HTTPError(self):", "self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests", "lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import requests mock_response = self.mocker.mock(requests.Response)", "test_400_raises_BadRequest(self): response = doubles.RequestsResponseStub() response.status_code = 400 self.assertRaises(exceptions.BadRequest, lambda: httpbroker.check_http_status(response)) def test_401_raises_Unauthorized(self): response", "mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\":", "def test_unexpected_status_code_raises_APIError(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.status_code self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock()", "= 406 self.assertRaises(exceptions.NotAcceptable, lambda: httpbroker.check_http_status(response)) def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500", "mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('http://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises(", "def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version, e.g. scieloapi/0.4 \"\"\" import requests", "httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'})", "from requests.exceptions.ConnectionError to scieloapi.exceptions.ConnectionError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.ConnectionError() self.assertRaises(exceptions.ConnectionError,", "'application/json'), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals',", "= doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def test_405_raises_NotFound(self): response = doubles.RequestsResponseStub()", "self.assertRaises(exceptions.Unauthorized, lambda: httpbroker.check_http_status(response)) def test_403_raises_Forbidden(self): response = doubles.RequestsResponseStub() response.status_code = 403 self.assertRaises(exceptions.Forbidden, lambda:", "self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')), data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests =", "requests.exceptions.TooManyRedirects to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda:", "mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get self.mocker.result(mock_requests_get)", "self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda", "= self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def", "class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self): params = {'username': 1, 'api_key': 2, 'c': 3} self.assertEqual(httpbroker.prepare_params(params),", "from scieloapi import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response =", "mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertRaises(exceptions.APIError, lambda: httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}') ) def test_location_header_is_returned(self): import", "mock_requests.get self.mocker.result(mock_requests_get) self.mocker.replay() self.assertEqual( httpbroker.get('https://manager.scielo.org/api/v1/', endpoint='journals', resource_id='70'), {'title': 'foo'} ) class PostFunctionTests(mocker.MockerTestCase): def", "1)]) def test_sort_list_of_tuples(self): params = [('username', 1), ('api_key', 2), ('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key',", "self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_unexpected_status_code_raises_APIError(self):", "self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type header must", "\"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_Timeout_to_Timeout(self):", "test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self): response", "def test_503_raises_ServiceUnavailable(self): response = doubles.RequestsResponseStub() response.status_code = 503 self.assertRaises(exceptions.ServiceUnavailable, lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self):", "requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'}) self.mocker.count(2) mock_response.status_code self.mocker.result(201) self.mocker.count(2) mock_requests_post =", "self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/'", "self.mocker.result(410) self.mocker.count(3) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests')", "lambda: httpbroker.check_http_status(response)) def test_200_returns_None(self): response = doubles.RequestsResponseStub() response.status_code = 200 self.assertIsNone(httpbroker.check_http_status(response)) class TranslateExceptionsTests(unittest.TestCase):", "('c', 3)] self.assertEqual(httpbroker.prepare_params(params), [('api_key', 2), ('c', 3), ('username', 1)]) def test_None_returns_None(self): params =", "requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.RequestException() self.assertRaises(exceptions.HTTPError, lambda: foo()) class PrepareParamsFunctionTests(unittest.TestCase): def test_sort_dict_by_key(self):", "mocker from scieloapi import httpbroker, exceptions import doubles class CheckHttpStatusTests(unittest.TestCase): def test_400_raises_BadRequest(self): response", "from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError,", "self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\": \"foo\"}'), 'http://manager.scielo.org/api/v1/journals/4/' ) def test_content_type_is_properly_set(self): \"\"\" Content-Type", "self.assertRaises(exceptions.ConnectionError, lambda: foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests", "foo()) def test_from_HTTPError_to_HTTPError(self): \"\"\" from requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def", "requests.exceptions.HTTPError to scieloapi.exceptions.HTTPError \"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.HTTPError() self.assertRaises(exceptions.HTTPError, lambda:", "data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post) self.mocker.replay() self.assertEqual( httpbroker.post('http://manager.scielo.org/api/v1/', endpoint='journals', data='{\"title\":", "httpbroker.check_http_status(response)) def test_404_raises_NotFound(self): response = doubles.RequestsResponseStub() response.status_code = 404 self.assertRaises(exceptions.NotFound, lambda: httpbroker.check_http_status(response)) def", "e.g. scieloapi/0.4 \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.json() self.mocker.result({'title': 'foo'}) mock_response.status_code self.mocker.result(200)", ") def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self): import requests", "self.mocker.result(200) mock_requests_get = self.mocker.mock() mock_requests_get('https://manager.scielo.org/api/v1/journals/70/', headers=mocker.ANY, params=None, verify=False) self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.get", "def test_500_raises_InternalServerError(self): response = doubles.RequestsResponseStub() response.status_code = 500 self.assertRaises(exceptions.InternalServerError, lambda: httpbroker.check_http_status(response)) def test_502_raises_BadGateway(self):", "header must be application/json \"\"\" import requests mock_response = self.mocker.mock(requests.Response) mock_response.headers self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'})", "self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self): \"\"\" from requests.exceptions.RequestException to scieloapi.exceptions.HTTPError \"\"\" import requests", "self.mocker.result(201) self.mocker.count(2) mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'), data='{\"title\": \"foo\"}')", "= None self.assertIsNone(httpbroker.prepare_params(params)) class GetFunctionTests(mocker.MockerTestCase): def test_user_agent_is_properly_set(self): \"\"\" By properly I mean: scieloapi/:version,", "\"\"\" import requests @httpbroker.translate_exceptions def foo(): raise requests.exceptions.TooManyRedirects() self.assertRaises(exceptions.HTTPError, lambda: foo()) def test_from_RequestException_to_HTTPError(self):", "{'title': 'foo'} ) def test_resource_id_makes_endpoint_mandatory(self): self.assertRaises( ValueError, lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70') ) def test_https_turns_off_ca_cert_verification(self):", "mock_requests_post = self.mocker.mock() mock_requests_post(url='http://manager.scielo.org/api/v1/journals/', headers=mocker.ANY, data='{\"title\": \"foo\"}') self.mocker.result(mock_response) mock_requests = self.mocker.replace('requests') mock_requests.post self.mocker.result(mock_requests_post)" ]
[ "halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices,", "Field. :param bounds: 2-Tuple of Tuple, bounds of the partition inside the mesh.", "dict() self.domain_numbers = dict() self.masks_without_halos = dict() p = [idx for idx in", "if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not None: halos, indices = modify_halos_and_indices(mesh,", "self.bounds = self.mesh.bounds for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos", "not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for idx in", "= mesh self.partitions = partitions self.stencil = stencil # TODO: @property protect self.domains", "self.mesh.bounds for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions,", "intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape,", "import print_function __author__ = 'christoph.statz <at> tu-dresden.de' from numpy import ndindex from maui.backend.helper", "maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds # TODO: Partition prototype is needed", "import division from __future__ import absolute_import from __future__ import print_function __author__ = 'christoph.statz", "of Tuple, stencil/footprint of the communication/halos for a multi-domain Field. :param bounds: 2-Tuple", "idx in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] = k if bounds is", "bounds is not None: halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] =", "= calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds):", "None: stencil = self.stencil if bounds is None: bounds = self.bounds else: bounds", "properties as properties self.parent = self self.mesh = mesh self.partitions = partitions self.stencil", "numpy import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from", "maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from", "__future__ import division from __future__ import absolute_import from __future__ import print_function __author__ =", "ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] = k if bounds is not None:", "idx) halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is", "stencil=None, shift=None): if stencil is None: stencil = self.stencil if bounds is None:", "ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import", "= dict() p = [idx for idx in ndindex(partitions)] for k in range(len(p)):", "that is to be partitioned. :param partitions: Tuple, number of partitions in each", "in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] = k if bounds is not", "self.domain_numbers[p[k]] = k if bounds is not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else:", "calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not None: halos,", "= self.mesh.copy() if shift is not None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil,", "mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh, the mesh instance", "is to be partitioned. :param partitions: Tuple, number of partitions in each dimension", ":param mesh: Mesh, the mesh instance that is to be partitioned. :param partitions:", "prototype is needed class Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh", "bounds: 2-Tuple of Tuple, bounds of the partition inside the mesh. \"\"\" #", "mask, halos) @property def meta_data(self): return {key: self.domains[key].mask for key in self.domains} def", "self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds", "range(len(p)): self.domain_numbers[p[k]] = k if bounds is not None: self.bounds = intersect_bounds(mesh.bounds, bounds)", "bounds of the partition inside the mesh. \"\"\" # todo: implement properties as", "bounds=None, stencil=None, shift=None): if stencil is None: stencil = self.stencil if bounds is", "\"\"\" Mesh partitioning. :param mesh: Mesh, the mesh instance that is to be", "\"\"\" # todo: implement properties as properties self.parent = self self.mesh = mesh", "-*- from __future__ import division from __future__ import absolute_import from __future__ import print_function", "def copy(self, bounds=None, stencil=None, shift=None): if stencil is None: stencil = self.stencil if", "shift is not None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil, bounds) p.parent =", "= create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask, halos) @property", "print_function __author__ = 'christoph.statz <at> tu-dresden.de' from numpy import ndindex from maui.backend.helper import", "mesh instance that is to be partitioned. :param partitions: Tuple, number of partitions", "Tuple, number of partitions in each dimension of the mesh. :param stencil: 2-Tuple", "do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not None: halos, indices = modify_halos_and_indices(mesh, indices,", "None: bounds = self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if", "None: halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask =", "Domain from maui.mesh.helper import intersect_bounds # TODO: Partition prototype is needed class Partition(object):", "else: self.bounds = self.mesh.bounds for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx)", "= self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift is", "utf-8 -*- from __future__ import division from __future__ import absolute_import from __future__ import", "self.masks_without_halos = dict() p = [idx for idx in ndindex(partitions)] for k in", "partitioned. :param partitions: Tuple, number of partitions in each dimension of the mesh.", "indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos)", "halos) @property def meta_data(self): return {key: self.domains[key].mask for key in self.domains} def copy(self,", "is not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for idx", "todo: implement properties as properties self.parent = self self.mesh = mesh self.partitions =", "from numpy import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices", "multi-domain Field. :param bounds: 2-Tuple of Tuple, bounds of the partition inside the", "is None: stencil = self.stencil if bounds is None: bounds = self.bounds else:", "intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift is not None: mesh.shift(shift) p =", "stencil is None: stencil = self.stencil if bounds is None: bounds = self.bounds", "# TODO: @property protect self.domains = dict() self.domain_numbers = dict() self.masks_without_halos = dict()", "dict() self.masks_without_halos = dict() p = [idx for idx in ndindex(partitions)] for k", "def meta_data(self): return {key: self.domains[key].mask for key in self.domains} def copy(self, bounds=None, stencil=None,", "None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil, bounds) p.parent = self return p", "Tuple, bounds of the partition inside the mesh. \"\"\" # todo: implement properties", "else: bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift is not None:", "is needed class Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning.", "to be partitioned. :param partitions: Tuple, number of partitions in each dimension of", "__future__ import print_function __author__ = 'christoph.statz <at> tu-dresden.de' from numpy import ndindex from", "indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx,", "if stencil is None: stencil = self.stencil if bounds is None: bounds =", "idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not None: halos, indices", "= k if bounds is not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds", "bounds is None: bounds = self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh =", "Mesh partitioning. :param mesh: Mesh, the mesh instance that is to be partitioned.", "from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain", "inside the mesh. \"\"\" # todo: implement properties as properties self.parent = self", "for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx,", "of the partition inside the mesh. \"\"\" # todo: implement properties as properties", "bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask,", "Mesh, the mesh instance that is to be partitioned. :param partitions: Tuple, number", "for k in range(len(p)): self.domain_numbers[p[k]] = k if bounds is not None: self.bounds", "= 'christoph.statz <at> tu-dresden.de' from numpy import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency,", "stencil/footprint of the communication/halos for a multi-domain Field. :param bounds: 2-Tuple of Tuple,", "Domain(idx, mesh, mask, halos) @property def meta_data(self): return {key: self.domains[key].mask for key in", "meta_data(self): return {key: self.domains[key].mask for key in self.domains} def copy(self, bounds=None, stencil=None, shift=None):", "as properties self.parent = self self.mesh = mesh self.partitions = partitions self.stencil =", "self.mesh = mesh self.partitions = partitions self.stencil = stencil # TODO: @property protect", "not None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil, bounds) p.parent = self return", "in range(len(p)): self.domain_numbers[p[k]] = k if bounds is not None: self.bounds = intersect_bounds(mesh.bounds,", "self.domains[idx] = Domain(idx, mesh, mask, halos) @property def meta_data(self): return {key: self.domains[key].mask for", "copy(self, bounds=None, stencil=None, shift=None): if stencil is None: stencil = self.stencil if bounds", "ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh,", "create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask, halos) @property def", "self.stencil = stencil # TODO: @property protect self.domains = dict() self.domain_numbers = dict()", "halos) self.domains[idx] = Domain(idx, mesh, mask, halos) @property def meta_data(self): return {key: self.domains[key].mask", "bounds = self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift", "__future__ import absolute_import from __future__ import print_function __author__ = 'christoph.statz <at> tu-dresden.de' from", "# -*- coding: utf-8 -*- from __future__ import division from __future__ import absolute_import", "the communication/halos for a multi-domain Field. :param bounds: 2-Tuple of Tuple, bounds of", "from __future__ import absolute_import from __future__ import print_function __author__ = 'christoph.statz <at> tu-dresden.de'", "from maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds # TODO: Partition prototype is", "if bounds is not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds", "\\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds #", "k if bounds is not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds =", "Tuple, stencil/footprint of the communication/halos for a multi-domain Field. :param bounds: 2-Tuple of", "__init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh, the mesh", "the partition inside the mesh. \"\"\" # todo: implement properties as properties self.parent", "partitioning. :param mesh: Mesh, the mesh instance that is to be partitioned. :param", "the mesh. \"\"\" # todo: implement properties as properties self.parent = self self.mesh", "stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos for a multi-domain Field. :param", "stencil # TODO: @property protect self.domains = dict() self.domain_numbers = dict() self.masks_without_halos =", "bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh, the mesh instance that is to", "tu-dresden.de' from numpy import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain,", "create_mask_from_indices(indices), bounds): if bounds is not None: halos, indices = modify_halos_and_indices(mesh, indices, halos,", "if bounds is not None: halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx]", "mesh, mask, halos) @property def meta_data(self): return {key: self.domains[key].mask for key in self.domains}", "in self.domains} def copy(self, bounds=None, stencil=None, shift=None): if stencil is None: stencil =", "if shift is not None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil, bounds) p.parent", "for a multi-domain Field. :param bounds: 2-Tuple of Tuple, bounds of the partition", "in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil) if", "k in range(len(p)): self.domain_numbers[p[k]] = k if bounds is not None: self.bounds =", "bounds) mesh = self.mesh.copy() if shift is not None: mesh.shift(shift) p = Partition(mesh,", "from __future__ import division from __future__ import absolute_import from __future__ import print_function __author__", "dimension of the mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos", "-*- coding: utf-8 -*- from __future__ import division from __future__ import absolute_import from", "is None: bounds = self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy()", "implement properties as properties self.parent = self self.mesh = mesh self.partitions = partitions", "= intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift is not None: mesh.shift(shift) p", "self.stencil if bounds is None: bounds = self.bounds else: bounds = intersect_bounds(self.bounds, bounds)", "p = [idx for idx in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] =", "maui.mesh.helper import intersect_bounds # TODO: Partition prototype is needed class Partition(object): def __init__(self,", "for idx in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] = k if bounds", "@property def meta_data(self): return {key: self.domains[key].mask for key in self.domains} def copy(self, bounds=None,", "self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask, halos)", "instance that is to be partitioned. :param partitions: Tuple, number of partitions in", "# todo: implement properties as properties self.parent = self self.mesh = mesh self.partitions", "self.domain_numbers = dict() self.masks_without_halos = dict() p = [idx for idx in ndindex(partitions)]", "calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds", "not None: halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask", "the mesh instance that is to be partitioned. :param partitions: Tuple, number of", "TODO: Partition prototype is needed class Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None):", "= self self.mesh = mesh self.partitions = partitions self.stencil = stencil # TODO:", "= partitions self.stencil = stencil # TODO: @property protect self.domains = dict() self.domain_numbers", "mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask, halos) @property def meta_data(self):", "= create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask, halos) @property def meta_data(self): return", ":param bounds: 2-Tuple of Tuple, bounds of the partition inside the mesh. \"\"\"", "self.domains[key].mask for key in self.domains} def copy(self, bounds=None, stencil=None, shift=None): if stencil is", "<reponame>cstatz/maui<filename>maui/backend/serial/partition.py # -*- coding: utf-8 -*- from __future__ import division from __future__ import", "the mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos for a", "self.partitions = partitions self.stencil = stencil # TODO: @property protect self.domains = dict()", "each dimension of the mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint of the", "mesh: Mesh, the mesh instance that is to be partitioned. :param partitions: Tuple,", "is not None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil, bounds) p.parent = self", "dict() p = [idx for idx in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]]", "communication/halos for a multi-domain Field. :param bounds: 2-Tuple of Tuple, bounds of the", "mesh self.partitions = partitions self.stencil = stencil # TODO: @property protect self.domains =", "partition inside the mesh. \"\"\" # todo: implement properties as properties self.parent =", "self.domains = dict() self.domain_numbers = dict() self.masks_without_halos = dict() p = [idx for", "= stencil # TODO: @property protect self.domains = dict() self.domain_numbers = dict() self.masks_without_halos", "create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh, mask, halos) @property def meta_data(self): return {key:", "of partitions in each dimension of the mesh. :param stencil: 2-Tuple of Tuple,", "= self.stencil if bounds is None: bounds = self.bounds else: bounds = intersect_bounds(self.bounds,", "shift=None): if stencil is None: stencil = self.stencil if bounds is None: bounds", "def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh, the", "key in self.domains} def copy(self, bounds=None, stencil=None, shift=None): if stencil is None: stencil", "mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos for a multi-domain", "2-Tuple of Tuple, bounds of the partition inside the mesh. \"\"\" # todo:", "of the mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos for", "bounds): if bounds is not None: halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds)", "of Tuple, bounds of the partition inside the mesh. \"\"\" # todo: implement", "= self.mesh.bounds for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos =", "mesh = self.mesh.copy() if shift is not None: mesh.shift(shift) p = Partition(mesh, self.partitions,", "stencil = self.stencil if bounds is None: bounds = self.bounds else: bounds =", "from maui.mesh.helper import intersect_bounds # TODO: Partition prototype is needed class Partition(object): def", "mesh. \"\"\" # todo: implement properties as properties self.parent = self self.mesh =", "bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift is not None: mesh.shift(shift)", "= dict() self.masks_without_halos = dict() p = [idx for idx in ndindex(partitions)] for", "a multi-domain Field. :param bounds: 2-Tuple of Tuple, bounds of the partition inside", "modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] =", "<at> tu-dresden.de' from numpy import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices,", "'christoph.statz <at> tu-dresden.de' from numpy import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\", "bounds is not None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for", "import intersect_bounds # TODO: Partition prototype is needed class Partition(object): def __init__(self, mesh,", "partitions self.stencil = stencil # TODO: @property protect self.domains = dict() self.domain_numbers =", "None: self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for idx in ndindex(self.partitions):", "is not None: halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices)", ":param partitions: Tuple, number of partitions in each dimension of the mesh. :param", ":param stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos for a multi-domain Field.", "__author__ = 'christoph.statz <at> tu-dresden.de' from numpy import ndindex from maui.backend.helper import calc_local_indices,", "from __future__ import print_function __author__ = 'christoph.statz <at> tu-dresden.de' from numpy import ndindex", "create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds # TODO:", "[idx for idx in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] = k if", "be partitioned. :param partitions: Tuple, number of partitions in each dimension of the", "self.bounds = intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for idx in ndindex(self.partitions): indices", "Partition prototype is needed class Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\"", "2-Tuple of Tuple, stencil/footprint of the communication/halos for a multi-domain Field. :param bounds:", "idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil)", "= calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not None:", "calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if", "import ndindex from maui.backend.helper import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain", "TODO: @property protect self.domains = dict() self.domain_numbers = dict() self.masks_without_halos = dict() p", "return {key: self.domains[key].mask for key in self.domains} def copy(self, bounds=None, stencil=None, shift=None): if", "partitions in each dimension of the mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint", "import Domain from maui.mesh.helper import intersect_bounds # TODO: Partition prototype is needed class", "self.domains} def copy(self, bounds=None, stencil=None, shift=None): if stencil is None: stencil = self.stencil", "= intersect_bounds(mesh.bounds, bounds) else: self.bounds = self.mesh.bounds for idx in ndindex(self.partitions): indices =", "protect self.domains = dict() self.domain_numbers = dict() self.masks_without_halos = dict() p = [idx", "= modify_halos_and_indices(mesh, indices, halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx]", "= Domain(idx, mesh, mask, halos) @property def meta_data(self): return {key: self.domains[key].mask for key", "stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not None: halos, indices =", "calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper import", "coding: utf-8 -*- from __future__ import division from __future__ import absolute_import from __future__", "import absolute_import from __future__ import print_function __author__ = 'christoph.statz <at> tu-dresden.de' from numpy", "self self.mesh = mesh self.partitions = partitions self.stencil = stencil # TODO: @property", "in each dimension of the mesh. :param stencil: 2-Tuple of Tuple, stencil/footprint of", "modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds # TODO: Partition prototype", "halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices), bounds): if bounds is not", "Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh,", "if bounds is None: bounds = self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh", "= dict() self.domain_numbers = dict() self.masks_without_halos = dict() p = [idx for idx", "for key in self.domains} def copy(self, bounds=None, stencil=None, shift=None): if stencil is None:", "partitions: Tuple, number of partitions in each dimension of the mesh. :param stencil:", "import calc_local_indices, calculate_adjacency, \\ create_mask_from_indices, do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper", "of the communication/halos for a multi-domain Field. :param bounds: 2-Tuple of Tuple, bounds", "self.mesh.copy() if shift is not None: mesh.shift(shift) p = Partition(mesh, self.partitions, stencil, bounds)", "stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh, the mesh instance that is", "= [idx for idx in ndindex(partitions)] for k in range(len(p)): self.domain_numbers[p[k]] = k", "partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh: Mesh, the mesh instance that", "bounds) else: self.bounds = self.mesh.bounds for idx in ndindex(self.partitions): indices = calc_local_indices(mesh.shape, self.partitions,", "{key: self.domains[key].mask for key in self.domains} def copy(self, bounds=None, stencil=None, shift=None): if stencil", "self.parent = self self.mesh = mesh self.partitions = partitions self.stencil = stencil #", "absolute_import from __future__ import print_function __author__ = 'christoph.statz <at> tu-dresden.de' from numpy import", "properties self.parent = self self.mesh = mesh self.partitions = partitions self.stencil = stencil", "indices = calc_local_indices(mesh.shape, self.partitions, idx) halos = calculate_adjacency(self.partitions, idx, stencil) if do_create_domain(mesh, create_mask_from_indices(indices),", "number of partitions in each dimension of the mesh. :param stencil: 2-Tuple of", "self.bounds else: bounds = intersect_bounds(self.bounds, bounds) mesh = self.mesh.copy() if shift is not", "division from __future__ import absolute_import from __future__ import print_function __author__ = 'christoph.statz <at>", "intersect_bounds # TODO: Partition prototype is needed class Partition(object): def __init__(self, mesh, partitions,", "class Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param mesh:", "halos, bounds) self.masks_without_halos[idx] = create_mask_from_indices(indices) mask = create_mask_from_indices(indices, halos) self.domains[idx] = Domain(idx, mesh,", "do_create_domain, modify_halos_and_indices from maui.backend.domain import Domain from maui.mesh.helper import intersect_bounds # TODO: Partition", "needed class Partition(object): def __init__(self, mesh, partitions, stencil, bounds=None): \"\"\" Mesh partitioning. :param", "@property protect self.domains = dict() self.domain_numbers = dict() self.masks_without_halos = dict() p =", "# TODO: Partition prototype is needed class Partition(object): def __init__(self, mesh, partitions, stencil," ]
[ "Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632,", "(6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'),", "Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'),", "(55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319, 'High Private", "Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264,", "Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension A'), (19904,", "(43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743,", "Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920,", "Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'),", "'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032, 12255,", "(65520, 65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664, 65791, 'Linear B Ideograms'),", "'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'),", "'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432,", "Unified Ideographs Extension C'), (177984, 178207, 'CJK Unified Ideographs Extension D'), (178208, 183983,", "Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency", "(6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'),", "'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters and", "(70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040,", "'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'),", "'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and", "2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071,", "Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'), (1048576, 1114111, 'Supplementary Private Use", "'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224, 68255, 'Old", "'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744,", "12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'),", "'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215,", "https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python def block(character): \"\"\" Return the Unicode block name for character, or None", "Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784,", "Epact Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'),", "67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840,", "67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711,", "(71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096,", "Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed", "'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687, 'IPA", "6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623, 'New Tai Lue'), (6624, 6655,", "65071, 'Combining Half Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small Form", "Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'),", "'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279,", "68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224, 68255, 'Old North Arabian'), (68288,", "CJK Letters and Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs", "'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'),", "Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704,", "3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095,", "if start <= cp <= end: return name blocks = [(0, 127, 'Basic", "(68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424,", "69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631,", "(12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility", "65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000,", "Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864,", "Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264,", "(9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223,", "(65056, 65071, 'Combining Half Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small", "66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old", "125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464,", "'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'), (768, 879,", "[(0, 127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384,", "11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031,", "'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880,", "Letters'), (768, 879, 'Combining Diacritical Marks'), (880, 1023, 'Greek and Coptic'), (1024, 1279,", "(67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'),", "(128640, 128767, 'Transport and Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric", "(1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'),", "67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'),", "6911, 'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167,", "(13312, 19903, 'CJK Unified Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968,", "(66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic", "Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi", "255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687,", "'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol", "'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'),", "7679, 'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936, 8191, 'Greek", "Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'),", "(12288, 12351, 'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544,", "Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583,", "(43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'),", "(5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792,", "'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719,", "Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing", "assert isinstance(character, str) and len(character) == 1, repr(character) cp = ord(character) for start,", "'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical", "'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928,", "return name blocks = [(0, 127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256,", "(1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'),", "127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'),", "'<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'),", "'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871,", "127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384, 591,", "69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'),", "(65536, 65663, 'Linear B Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean", "'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'),", "'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'),", "(3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608,", "64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'),", "'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'),", "Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals", "'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536,", "78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'),", "(42752, 42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'),", "Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'), (119520,", "6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'),", "43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776,", "687, 'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical Marks'),", "(55296, 56191, 'High Surrogates'), (56192, 56319, 'High Private Use Surrogates'), (56320, 57343, 'Low", "(43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296,", "'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram", "'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical", "(44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'),", "character, or None if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert", "(43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215,", "Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks for Symbols'), (8448,", "(127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591,", "42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752,", "42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone", "(65072, 65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small Form Variants'), (65136, 65279, 'Arabic", "Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension", "'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'),", "(4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'),", "(113824, 113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical", "Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private", "'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac", "6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480,", "Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735,", "(66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'),", "(68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376,", "'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'),", "(71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'),", "74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian", "Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848,", "A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960, 42127,", "Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623, 'New Tai", "'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'),", "(195104, 917503, 'No Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'),", "'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode Block'), (917504, 917631, 'Tags'), (917760,", "128767, 'Transport and Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes", "(128896, 129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols", "Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839,", "(71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384,", "(127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'),", "66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'),", "173791, 'CJK Unified Ideographs Extension B'), (173824, 177983, 'CJK Unified Ideographs Extension C'),", "Letters and Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension", "Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024, 129279,", "(1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'),", "'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311,", "<= end: return name blocks = [(0, 127, 'Basic Latin'), (128, 255, 'Latin-1", "12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783,", "(3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584,", "'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'),", "'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623,", "12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters", "2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208,", "94207, 'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592,", "Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664, 65791, 'Linear", "(10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'),", "(9472, 9599, 'Box Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728,", "Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168,", "'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664,", "'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208,", "(129280, 129535, 'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols", "(68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224,", "2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199,", "66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact", "'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535,", "(7936, 8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'),", "'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended", "(3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712,", "1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536,", "'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087,", "Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231,", "128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640,", "3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255,", "(177984, 178207, 'CJK Unified Ideographs Extension D'), (178208, 183983, 'CJK Unified Ideographs Extension", "127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487,", "(119296, 119375, 'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai", "Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687,", "Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early", "(9280, 9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'),", "Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960,", "Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208,", "Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'),", "(43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul", "5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399,", "'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'),", "'CJK Unified Ideographs Extension F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503,", "'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679,", "'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic", "'No Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575,", "63743, 'Private Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation", "125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman", "(56192, 56319, 'High Private Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private", "(7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359,", "68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423,", "'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295,", "(2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431,", "'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759,", "Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters and Months'),", "B Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935,", "(70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319,", "(8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215,", "(6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai", "(7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376,", "'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527,", "'Tai Le'), (6528, 6623, 'New Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687,", "Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental", "(6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911,", "Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936,", "56319, 'High Private Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private Use", "66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431,", "(119552, 119647, 'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808, 120831,", "'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352, 8399,", "(68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863,", "(119648, 119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton", "'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic", "(77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian", "Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055,", "Half Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small Form Variants'), (65136,", "'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'),", "(110896, 110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839,", "'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304,", "10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495,", "(68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224, 68255, 'Old North Arabian'),", "Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023,", "67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'),", "Unified Ideographs Extension B'), (173824, 177983, 'CJK Unified Ideographs Extension C'), (177984, 178207,", "120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136,", "'Low Surrogates'), (57344, 63743, 'Private Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256,", "1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920,", "7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616,", "North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448,", "67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000,", "(7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical", "(43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'),", "Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303,", "92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760,", "Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'),", "(128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and Map Symbols'), (128768, 128895, 'Alchemical", "'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'),", "8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous", "Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'),", "177983, 'CJK Unified Ideographs Extension C'), (177984, 178207, 'CJK Unified Ideographs Extension D'),", "3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711,", "'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656,", "Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian", "(8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279,", "Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832,", "Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining Half", "B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936, 65999,", "'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'),", "119647, 'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical", "(4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'),", "Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703,", "'Combining Half Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small Form Variants'),", "'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007,", "127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols", "'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008,", "'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448,", "128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and Map Symbols'), (128768, 128895, 'Alchemical Symbols'),", "Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'),", "'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'),", "'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral", "Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension B'), (173824, 177983, 'CJK Unified Ideographs", "'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'),", "Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'), (7680,", "4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian", "Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'),", "Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999,", "https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and len(character) == 1, repr(character) cp =", "'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047,", "6527, 'Tai Le'), (6528, 6623, 'New Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656,", "110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960,", "'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'),", "66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'),", "(110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784, 119039,", "Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191,", "in blocks: if start <= cp <= end: return name blocks = [(0,", "(66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639,", "Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental", "70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'),", "2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'),", "'High Surrogates'), (56192, 56319, 'High Private Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344,", "def block(character): \"\"\" Return the Unicode block name for character, or None if", "'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839,", "(82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927,", "Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'),", "B'), (173824, 177983, 'CJK Unified Ideographs Extension C'), (177984, 178207, 'CJK Unified Ideographs", "'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti", "Le'), (6528, 6623, 'New Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'),", "'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887,", "'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960, 111359,", "71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840,", "(9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984,", "Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880,", "Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'),", "71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367,", "Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and Map", "'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo", "65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small Form Variants'), (65136, 65279, 'Arabic Presentation", "'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792, 65855,", "5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983,", "(118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek", "128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280,", "67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old", "7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375,", "(5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320,", "6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400,", "65135, 'Small Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and", "(1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872,", "'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental", "3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583,", "'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071,", "(6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'),", "'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed", "Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479,", "7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian", "'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'),", "(3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096,", "'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South", "(13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension A'), (19904, 19967,", "Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'), (129536, 129647,", "(66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584, 67647,", "(68608, 68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247,", "68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional", "'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103,", "Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining", "and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424,", "(1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'),", "'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'),", "68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863, 'Old", "(74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895,", "'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640,", "72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727,", "(40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559,", "Ideographs Extension E'), (183984, 191471, 'CJK Unified Ideographs Extension F'), (194560, 195103, 'CJK", "(110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664,", "'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'),", "Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols and Punctuation'), (12352,", "'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian", "'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'),", "129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and", "Marks Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303,", "Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832,", "'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847,", "69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143,", "'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648,", "Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592,", "(6528, 6623, 'New Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688,", "(66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old", "'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'),", "(5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144,", "Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936, 8191, 'Greek Extended'), (8192,", "Extension D'), (178208, 183983, 'CJK Unified Ideographs Extension E'), (183984, 191471, 'CJK Unified", "(10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359,", "(8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks for Symbols'), (8448, 8527,", "'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille", "(126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232,", "(5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888,", "'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'),", "(1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'),", "(68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687,", "'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'),", "'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400,", "Compatibility Forms'), (65104, 65135, 'Small Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280,", "'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'),", "(94176, 94207, 'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'),", "8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks", "Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263,", "'<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'),", "66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'),", "65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'),", "'Ornamental Dingbats'), (128640, 128767, 'Transport and Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896,", "121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'),", "block(character): \"\"\" Return the Unicode block name for character, or None if character", "'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous", "'New Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai", "(8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical Character Recognition'),", "'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871,", "'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'),", "(4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992,", "Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'),", "(12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735,", "Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'),", "(69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888,", "Dingbats'), (128640, 128767, 'Transport and Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023,", "'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics", "'CJK Unified Ideographs Extension D'), (178208, 183983, 'CJK Unified Ideographs Extension E'), (183984,", "(123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq", "Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'),", "'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'),", "(66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864,", "(129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK", "Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining", "Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176,", "'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983,", "'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari", "4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120,", "10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'),", "(67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872,", "129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'),", "'Private Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'),", "Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic", "and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664, 65791,", "917503, 'No Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040,", "# from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python def block(character): \"\"\" Return the Unicode block name for character,", "(6832, 6911, 'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104,", "Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'),", "Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'),", "Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'),", "(384, 591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'),", "'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319,", "'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552,", "'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'),", "(2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic", "(11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904,", "(768, 879, 'Combining Diacritical Marks'), (880, 1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'),", "(42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'),", "127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488,", "9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312,", "3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351,", "'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631,", "Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375,", "'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136,", "'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian", "isinstance(character, str) and len(character) == 1, repr(character) cp = ord(character) for start, end,", "'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'),", "(69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840,", "(125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'),", "'Small Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth", "8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control", "(122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'),", "'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767,", "(120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647,", "'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing", "119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'),", "Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303, 'General", "(8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280,", "'Transport and Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'),", "'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359,", "'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic Number Forms'), (43072,", "Surrogates'), (57344, 63743, 'Private Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335,", "Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647,", "68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487,", "Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'),", "cp = ord(character) for start, end, name in blocks: if start <= cp", "Characters'), (12288, 12351, 'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'),", "Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts and", "72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895,", "block name for character, or None if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python", "Private Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private Use Area'), (63744,", "119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical", "11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272,", "'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608,", "(65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271,", "'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols and Punctuation'),", "Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private Use Area'), (63744, 64255, 'CJK", "101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959,", "12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'),", "'High Private Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private Use Area'),", "42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784, 43007,", "Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric", "68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old", "'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584,", "Unicode block name for character, or None if character has no block. from", "10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751,", "'CJK Unified Ideographs Extension B'), (173824, 177983, 'CJK Unified Ideographs Extension C'), (177984,", "(42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008,", "71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424,", "str) and len(character) == 1, repr(character) cp = ord(character) for start, end, name", "68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'),", "(5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian", "Ideographs Extension D'), (178208, 183983, 'CJK Unified Ideographs Extension E'), (183984, 191471, 'CJK", "(72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960,", "69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111,", "'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143,", "(73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform", "'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648,", "100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana", "55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319, 'High Private Use", "Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical", "(69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016,", "43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo", "7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295,", "'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'),", "start, end, name in blocks: if start <= cp <= end: return name", "'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old", "'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384, 591, 'Latin", "Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304,", "(67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191,", "'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric", "12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'),", "Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'),", "'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'),", "(43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar", "name for character, or None if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param", "68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi", "43071, 'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263,", "(74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph", "1, repr(character) cp = ord(character) for start, end, name in blocks: if start", "42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'),", "(7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'), (7680, 7935,", "'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'),", "name in blocks: if start <= cp <= end: return name blocks =", "72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471,", "(12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK", "43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824,", "Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin", "Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK", "68223, 'Old South Arabian'), (68224, 68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352,", "'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207,", "'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239,", "Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687,", "'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519,", "'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480,", "917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'), (1048576, 1114111, 'Supplementary", "8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical", "SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151,", "Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631, 'Block Elements'),", "Extension F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode Block'),", "69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967,", "'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK", "66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927,", "'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648,", "Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311,", "Arabian'), (68224, 68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416,", "2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455,", "68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'),", "'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128,", "<= cp <= end: return name blocks = [(0, 127, 'Basic Latin'), (128,", "'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and", "(72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816,", "'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272,", "(93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352,", "(110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'),", "'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'),", "178207, 'CJK Unified Ideographs Extension D'), (178208, 183983, 'CJK Unified Ideographs Extension E'),", "(129648, 129791, 'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension B'),", "(194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode Block'), (917504, 917631,", "Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport", "Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960,", "name blocks = [(0, 127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383,", "cp <= end: return name blocks = [(0, 127, 'Basic Latin'), (128, 255,", "'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'),", "Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639,", "83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'),", "'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312, 9471,", "9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600,", "66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736,", "from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and len(character) == 1, repr(character) cp", "(2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328,", "65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining Half Marks'), (65072,", "(173824, 177983, 'CJK Unified Ideographs Extension C'), (177984, 178207, 'CJK Unified Ideographs Extension", "Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'), (1048576, 1114111, 'Supplementary Private Use Area-B')]", "'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic", "(65856, 65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'),", "Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions", "'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823,", "Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'),", "'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272,", "(19968, 40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'),", "(43056, 43071, 'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232,", "Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'),", "(5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984,", "110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823,", "56191, 'High Surrogates'), (56192, 56319, 'High Private Use Surrogates'), (56320, 57343, 'Low Surrogates'),", "(5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952,", ":param character\"\"\" assert isinstance(character, str) and len(character) == 1, repr(character) cp = ord(character)", "'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'),", "183983, 'CJK Unified Ideographs Extension E'), (183984, 191471, 'CJK Unified Ideographs Extension F'),", "(67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'),", "(6480, 6527, 'Tai Le'), (6528, 6623, 'New Tai Lue'), (6624, 6655, 'Khmer Symbols'),", "(66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816,", "'CJK Compatibility Forms'), (65104, 65135, 'Small Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'),", "'Combining Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'),", "'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688,", "and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911,", "43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136,", "Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul", "'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil", "'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'),", "Ideographs Supplement'), (195104, 917503, 'No Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation", "'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'),", "Surrogates'), (56192, 56319, 'High Private Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743,", "(73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'),", "(68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927,", "(93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'), (94208,", "Extension E'), (183984, 191471, 'CJK Unified Ideographs Extension F'), (194560, 195103, 'CJK Compatibility", "2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'),", "(68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional", "'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'),", "9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239,", "65055, 'Vertical Forms'), (65056, 65071, 'Combining Half Marks'), (65072, 65103, 'CJK Compatibility Forms'),", "Unified Ideographs Extension F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No", "(71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'),", "Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical Character", "'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935,", "(12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'),", "78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'),", "Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664, 65791, 'Linear B", "(66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'), (66352,", "Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic", "C'), (177984, 178207, 'CJK Unified Ideographs Extension D'), (178208, 183983, 'CJK Unified Ideographs", "D'), (178208, 183983, 'CJK Unified Ideographs Extension E'), (183984, 191471, 'CJK Unified Ideographs", "no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and len(character) == 1,", "42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056,", "'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'),", "Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824,", "(43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'),", "119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting Rod", "(63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic", "Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655,", "'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'),", "3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607,", "'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192,", "Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519,", "(92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols", "(10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous", "'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287,", "Unified Ideographs Extension E'), (183984, 191471, 'CJK Unified Ideographs Extension F'), (194560, 195103,", "'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical", "12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'),", "(4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified", "'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288,", "Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'),", "(92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'),", "Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392,", "111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine", "383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688, 767,", "2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'),", "(42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic", "(11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'),", "'<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'),", "'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric", "(126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023,", "Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension B'), (173824, 177983, 'CJK Unified", "and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520,", "'<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488,", "(128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and Map Symbols'),", "'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568,", "92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207,", "(12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799,", "93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'), (94208, 100351,", "(2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072,", "(69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112,", "(10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'),", "Unified Ideographs Extension D'), (178208, 183983, 'CJK Unified Ideographs Extension E'), (183984, 191471,", "43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic Number Forms'),", "'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin", "(127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic", "'<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High", "5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887,", "70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295,", "12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800,", "6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040,", "10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496,", "'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855,", "'Enclosed CJK Letters and Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified", "73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728,", "'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815,", "19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'),", "13055, 'Enclosed CJK Letters and Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK", "'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759,", "Extension C'), (177984, 178207, 'CJK Unified Ideographs Extension D'), (178208, 183983, 'CJK Unified", "(71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272,", "Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623, 'New", "Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'),", "(9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'),", "11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'),", "== 1, repr(character) cp = ord(character) for start, end, name in blocks: if", "70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'),", "65663, 'Linear B Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'),", "(183984, 191471, 'CJK Unified Ideographs Extension F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'),", "65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'),", "Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic Number Forms'), (43072, 43135,", "195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode Block'), (917504, 917631, 'Tags'),", "Extensions'), (12800, 13055, 'Enclosed CJK Letters and Months'), (13056, 13311, 'CJK Compatibility'), (13312,", "'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'),", "Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319, 'High", "(128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'),", "(65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining Half Marks'), (65072, 65103, 'CJK Compatibility", "'<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'),", "126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'),", "Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743,", "43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar", "ord(character) for start, end, name in blocks: if start <= cp <= end:", "Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana", "Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527,", "character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and len(character)", "(65104, 65135, 'Small Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth", "Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'),", "Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759,", "65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'),", "'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815,", "'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791,", "(68192, 68223, 'Old South Arabian'), (68224, 68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'),", "123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'),", "(917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use", "(11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647,", "'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks for", "and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'),", "8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike", "(7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic", "(256, 383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688,", "70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783,", "12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo", "Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'),", "'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic", "43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615,", "'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format", "69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760,", "(67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759,", "Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216,", "'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'),", "(66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear", "Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters and Months'), (13056, 13311, 'CJK Compatibility'),", "and Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension A'),", "(1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144,", "65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'),", "(592, 687, 'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical", "Unified Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified", "1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792,", "'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'),", "'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144,", "93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and", "7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'),", "'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824,", "Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'), (131072, 173791,", "Nagri'), (43056, 43071, 'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'),", "43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031,", "(3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840,", "(66176, 66207, 'Lycian'), (66208, 66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304, 66351,", "'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'),", "'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical Marks'), (880, 1023, 'Greek and Coptic'),", "69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887,", "'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111,", "Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks", "'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal", "F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode Block'), (917504,", "4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991,", "'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'),", "'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small", "Forms'), (65056, 65071, 'Combining Half Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104, 65135,", "879, 'Combining Diacritical Marks'), (880, 1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280,", "(68224, 68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447,", "'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'),", "Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'),", "(67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968,", "Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623,", "(113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'),", "'Box Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous", "1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919,", "119375, 'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan", "(11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'),", "'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'),", "Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos", "4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024,", "(2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200,", "73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers", "Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584,", "Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288, 12351,", "Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'),", "67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'),", "Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'),", "Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient", "blocks: if start <= cp <= end: return name blocks = [(0, 127,", "'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'),", "'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'),", "(688, 767, 'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical Marks'), (880, 1023, 'Greek", "Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391,", "1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112,", "Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'),", "'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'),", "'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704,", "(917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'), (1048576, 1114111,", "7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks", "Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247,", "113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040,", "(119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'),", "73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880,", "11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032,", "'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic", "Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'),", "12351, 'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591,", "'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'),", "Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319, 'High Private Use Surrogates'), (56320, 57343,", "66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072,", "19903, 'CJK Unified Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959,", "(69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic", "(119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting", "7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551,", "(9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240,", "70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'),", "Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'), (68736,", "127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'),", "11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'),", "'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936, 8191, 'Greek Extended'),", "Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'),", "'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560,", "43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul", "8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311,", "'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096,", "6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal", "(7616, 7679, 'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended Additional'), (7936, 8191,", "(72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664,", "'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360,", "(70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264,", "43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei", "66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688,", "'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743,", "Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663,", "Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'),", "(11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903,", "(71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192,", "Extended-A'), (11776, 11903, 'Supplemental Punctuation'), (11904, 12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi", "Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'),", "Ideographs Extension F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104, 917503, 'No Unicode", "Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'),", "blocks = [(0, 127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin", "'<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic", "67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903,", "'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese", "43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888,", "(43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775,", "6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'), (6912,", "Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511,", "'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'),", "Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927, 'Glagolitic", "(2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456,", "Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'),", "(66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648,", "Return the Unicode block name for character, or None if character has no", "12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters and Months'), (13056, 13311,", "'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'), (1048576,", "55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192,", "= [(0, 127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'),", "Forms'), (65104, 65135, 'Small Form Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519,", "'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736,", "'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek", "'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224, 68255, 'Old North Arabian'), (68288, 68351,", "(92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176,", "'Combining Diacritical Marks Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'),", "Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224, 68255, 'Old North", "(126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino", "(72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440,", "Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751,", "(178208, 183983, 'CJK Unified Ideographs Extension E'), (183984, 191471, 'CJK Unified Ideographs Extension", "10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols", "(42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784,", "Forms'), (8592, 8703, 'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216,", "Variants'), (65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520,", "Ideographs Extension B'), (173824, 177983, 'CJK Unified Ideographs Extension C'), (177984, 178207, 'CJK", "8303, 'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400,", "(3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352,", "Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039,", "Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana", "(7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424,", "'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'), (66560,", "11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic", "2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'), (3072, 3199, 'Telugu'), (3200, 3327,", "9279, 'Control Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472,", "Modifier Letters'), (768, 879, 'Combining Diacritical Marks'), (880, 1023, 'Greek and Coptic'), (1024,", "(9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical Symbols-A'), (10224,", "Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box", "12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols and", "'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'),", "'<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'),", "67759, 'Nabataean'), (67808, 67839, 'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999,", "71375, 'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191,", "(92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952,", "Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783,", "(43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'),", "Supplement'), (195104, 917503, 'No Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors", "66271, 'Carian'), (66272, 66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383,", "(127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'), (127232, 127487, 'Enclosed Alphanumeric Supplement'),", "6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining", "'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'),", "'CJK Unified Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK", "72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056, 73135,", "(128, 255, 'Latin-1 Supplement'), (256, 383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592,", "8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352,", "65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056,", "9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous", "Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791,", "Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'),", "Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240,", "from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python def block(character): \"\"\" Return the Unicode block name for character, or", "'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160,", "43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai", "(64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'),", "Numbers and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896,", "(126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic", "Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319, 'High Private Use Surrogates'), (56320,", "'<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752, 74879,", "(8400, 8447, 'Combining Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591,", "None if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str)", "7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423,", "(57344, 63743, 'Private Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic", "Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental", "Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135, 'Domino Tiles'), (127136, 127231, 'Playing Cards'),", "'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining Half Marks'), (65072, 65103,", "(11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic", "'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312,", "'<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala", "'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967,", "'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'),", "66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527, 'Old Persian'),", "6623, 'New Tai Lue'), (6624, 6655, 'Khmer Symbols'), (6656, 6687, 'Buginese'), (6688, 6831,", "Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976,", "'<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic", "'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528,", "9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727,", "5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143,", "71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447,", "Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535,", "'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension B'), (173824, 177983,", "94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119,", "Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'),", "Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'),", "Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'), (119648, 119679,", "(66272, 66303, 'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384,", "Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631, 'Block", "'Ugaritic'), (66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735,", "'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512, 128591, 'Emoticons'), (128592,", "'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936,", "'Tibetan'), (4096, 4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'),", "(72960, 73055, '<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'),", "'Linear B Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856,", "A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712,", "len(character) == 1, repr(character) cp = ord(character) for start, end, name in blocks:", "43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360,", "(4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'),", "Extended Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts", "Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient", "(65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'),", "'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial", "Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432,", "64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation", "'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'),", "'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'),", "'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'),", "Marks'), (880, 1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'),", "72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815,", "(119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'), (119520, 119551, 'Mayan", "and Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension B'), (173824, 177983, 'CJK", "'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs", "'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744,", "(1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'),", "7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin", "66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679,", "1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423,", "and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical Marks for Symbols'),", "70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167,", "'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'), (42560,", "'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296,", "129535, 'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols and", "69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968, 70015,", "1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'), (2048,", "(10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624,", "65535, 'Specials'), (65536, 65663, 'Linear B Syllabary'), (65664, 65791, 'Linear B Ideograms'), (65792,", "191471, 'CJK Unified Ideographs Extension F'), (194560, 195103, 'CJK Compatibility Ideographs Supplement'), (195104,", "Unicode Block'), (917504, 917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary", "Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'),", "7935, 'Latin Extended Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304,", "for character, or None if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\"", "70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263,", "Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'), (119648,", "7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615,", "has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and len(character) ==", "(67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680,", "'Hatran'), (67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095,", "57343, 'Low Surrogates'), (57344, 63743, 'Private Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'),", "'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'),", "Albanian'), (67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'),", "Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'), (43648, 43743, 'Tai Viet'), (43744,", "'Vertical Forms'), (65056, 65071, 'Combining Half Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104,", "12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana", "'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and Map Symbols'), (128768, 128895,", "Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical", "'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320,", "13311, 'CJK Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension A'), (19904, 19967, 'Yijing", "92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111,", "'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623, 'New Tai Lue'), (6624, 6655, 'Khmer", "Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944,", "Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'), (122880, 122927,", "2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688,", "(7680, 7935, 'Latin Extended Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'),", "Marks'), (65072, 65103, 'CJK Compatibility Forms'), (65104, 65135, 'Small Form Variants'), (65136, 65279,", "75087, 'Early Dynastic Cuneiform'), (77824, 78895, 'Egyptian Hieroglyphs'), (78896, 78911, 'Egyptian Hieroglyph Format", "66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455, 'Linear A'), (67584, 67647, 'Cypriot", "block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and len(character) == 1, repr(character)", "Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808,", "(12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703,", "(7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552,", "'Latin Extended Additional'), (7936, 8191, 'Greek Extended'), (8192, 8303, 'General Punctuation'), (8304, 8351,", "'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'),", "character\"\"\" assert isinstance(character, str) and len(character) == 1, repr(character) cp = ord(character) for", "start <= cp <= end: return name blocks = [(0, 127, 'Basic Latin'),", "126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical", "66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863,", "Greek Musical Notation'), (119520, 119551, 'Mayan Numerals'), (119552, 119647, 'Tai Xuan Jing Symbols'),", "767, 'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical Marks'), (880, 1023, 'Greek and", "(8192, 8303, 'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'),", "(5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016,", "(72096, 72191, 'Nandinagari'), (72192, 72271, '<NAME>'), (72272, 72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704,", "'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527, '<NAME>'), (68608, 68687, 'Old Turkic'),", "'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'),", "Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient Greek Musical Notation'), (119520, 119551,", "Extended'), (6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623, 'New Tai Lue'),", "(7248, 7295, 'Ol Chiki'), (7296, 7311, 'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360,", "(11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'),", "'Arrows'), (8704, 8959, 'Mathematical Operators'), (8960, 9215, 'Miscellaneous Technical'), (9216, 9279, 'Control Pictures'),", "113839, 'Shorthand Format Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'),", "Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining", "Supplement'), (256, 383, 'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'),", "'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175, 'Dingbats'), (10176, 10223, 'Miscellaneous Mathematical", "= ord(character) for start, end, name in blocks: if start <= cp <=", "Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592,", "(70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'),", "'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'),", "(56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private Use Area'), (63744, 64255, 'CJK Compatibility", "42751, 'Bamum'), (42752, 42783, 'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055,", "68351, 'Manichaean'), (68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'),", "and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543, 'Katakana'), (12544, 12591, 'Bopomofo'), (12592, 12687,", "Xuan Jing Symbols'), (119648, 119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'),", "'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters and Months'), (13056, 13311, 'CJK", "'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159, 'Syriac Supplement'), (2208, 2303,", "(64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation", "(69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600, 69631, 'Elymaic'), (69632, 69759, 'Brahmi'),", "(100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'), (110896,", "Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand Format Controls'), (118784,", "and Map Symbols'), (128768, 128895, 'Alchemical Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024,", "43135, 'Phags-pa'), (43136, 43231, 'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312,", "9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983, 'Miscellaneous Symbols'), (9984, 10175,", "'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'),", "73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'), (74752,", "South Arabian'), (68224, 68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415, 'Avestan'),", "Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common", "'Modifier Tone Letters'), (42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071,", "8447, 'Combining Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number", "43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'),", "'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical Operators'), (11008, 11263, 'Miscellaneous Symbols and", "Diacritical Marks'), (880, 1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic", "1983, 'Thaana'), (1984, 2047, 'NKo'), (2048, 2111, 'Samaritan'), (2112, 2143, 'Mandaic'), (2144, 2159,", "(11904, 12031, 'CJK Radicals Supplement'), (12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description", "Controls'), (118784, 119039, 'Byzantine Musical Symbols'), (119040, 119295, 'Musical Symbols'), (119296, 119375, 'Ancient", "11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'),", "'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736, 12783, 'CJK", "(73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751, 'Cuneiform'),", "(12032, 12255, 'Kangxi Radicals'), (12272, 12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols", "5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015,", "and len(character) == 1, repr(character) cp = ord(character) for start, end, name in", "68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223, 'Old South Arabian'), (68224, 68255,", "Supplement'), (110848, 110895, 'Kana Extended-A'), (110896, 110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'),", "(10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752, 11007, 'Supplemental Mathematical", "110959, 'Small Kana Extension'), (110960, 111359, 'Nushu'), (113664, 113823, 'Duployan'), (113824, 113839, 'Shorthand", "'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic", "'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical Marks'), (880,", "'General Punctuation'), (8304, 8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447,", "or None if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character,", "'Lisu'), (42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783,", "Use Area'), (63744, 64255, 'CJK Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336,", "(43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520,", "2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943,", "(123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064,", "(2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816,", "Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic Extensions'),", "Ideographs Extension C'), (177984, 178207, 'CJK Unified Ideographs Extension D'), (178208, 183983, 'CJK", "(65664, 65791, 'Linear B Ideograms'), (65792, 65855, 'Aegean Numbers'), (65856, 65935, 'Ancient Greek", "(65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear B", "'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464,", "Extension B'), (173824, 177983, 'CJK Unified Ideographs Extension C'), (177984, 178207, 'CJK Unified", "'Saurashtra'), (43232, 43263, 'Devanagari Extended'), (43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391,", "(65136, 65279, 'Arabic Presentation Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535,", "Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'),", "and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'), (131072,", "(69968, 70015, 'Mahajani'), (70016, 70111, 'Sharada'), (70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223,", "(3584, 3711, 'Thai'), (3712, 3839, 'Lao'), (3840, 4095, 'Tibetan'), (4096, 4255, 'Myanmar'), (4256,", "'Latin Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688, 767, 'Spacing", "'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024, 65039, 'Variation Selectors'), (65040,", "(131072, 173791, 'CJK Unified Ideographs Extension B'), (173824, 177983, 'CJK Unified Ideographs Extension", "Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463, 'Ugaritic'), (66464, 66527,", "'Medefaidrin'), (93952, 94111, 'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'),", "'Syloti Nagri'), (43056, 43071, 'Common Indic Number Forms'), (43072, 43135, 'Phags-pa'), (43136, 43231,", "(12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed CJK Letters and Months'), (13056,", "'Combining Diacritical Marks'), (880, 1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327,", "'Miao'), (94176, 94207, 'Ideographic Symbols and Punctuation'), (94208, 100351, 'Tangut'), (100352, 101119, 'Tangut", "(127232, 127487, 'Enclosed Alphanumeric Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous", "(6400, 6479, 'Limbu'), (6480, 6527, 'Tai Le'), (6528, 6623, 'New Tai Lue'), (6624,", "70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879,", "(880, 1023, 'Greek and Coptic'), (1024, 1279, 'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328,", "'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375,", "for start, end, name in blocks: if start <= cp <= end: return", "Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567,", "Extended-B'), (592, 687, 'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'), (768, 879, 'Combining", "end, name in blocks: if start <= cp <= end: return name blocks", "43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul", "'Buginese'), (6688, 6831, 'Tai Tham'), (6832, 6911, 'Combining Diacritical Marks Extended'), (6912, 7039,", "1791, 'Arabic'), (1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984,", "'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479,", "'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944, 3071, 'Tamil'),", "(73728, 74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early Dynastic", "'Control Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599,", "(70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784,", "(8304, 8351, 'Superscripts and Subscripts'), (8352, 8399, 'Currency Symbols'), (8400, 8447, 'Combining Diacritical", "123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143,", "Compatibility'), (13312, 19903, 'CJK Unified Ideographs Extension A'), (19904, 19967, 'Yijing Hexagram Symbols'),", "(66464, 66527, 'Old Persian'), (66560, 66639, 'Deseret'), (66640, 66687, 'Shavian'), (66688, 66735, 'Osmanya'),", "44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295, 'Hangul Jamo Extended-B'), (55296, 56191,", "Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024, 127135,", "'Coptic Epact Numbers'), (66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old", "(2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'), (2688, 2815, 'Gujarati'), (2816, 2943, 'Oriya'), (2944,", "70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296,", "(43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'),", "(43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'),", "(9312, 9471, 'Enclosed Alphanumerics'), (9472, 9599, 'Box Drawing'), (9600, 9631, 'Block Elements'), (9632,", "(12800, 13055, 'Enclosed CJK Letters and Months'), (13056, 13311, 'CJK Compatibility'), (13312, 19903,", "'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'), (1048576, 1114111, 'Supplementary Private", "71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'), (72192, 72271,", "(6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248,", "9599, 'Box Drawing'), (9600, 9631, 'Block Elements'), (9632, 9727, 'Geometric Shapes'), (9728, 9983,", "(94208, 100351, 'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895,", "Extended-A'), (384, 591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688, 767, 'Spacing Modifier", "Forms-B'), (65280, 65519, 'Halfwidth and Fullwidth Forms'), (65520, 65535, 'Specials'), (65536, 65663, 'Linear", "5951, 'Hanunoo'), (5952, 5983, 'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319,", "'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs", "Hieroglyphs'), (92160, 92735, 'Bamum Supplement'), (92736, 92783, 'Mro'), (92880, 92927, '<NAME>'), (92928, 93071,", "Symbols'), (8400, 8447, 'Combining Diacritical Marks for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528,", "Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647,", "(71040, 71167, 'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'),", "Extensions'), (688, 767, 'Spacing Modifier Letters'), (768, 879, 'Combining Diacritical Marks'), (880, 1023,", "'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023, 'Ethiopic Supplement'), (5024, 5119,", "the Unicode block name for character, or None if character has no block.", "'Sutton SignWriting'), (122880, 122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928,", "end: return name blocks = [(0, 127, 'Basic Latin'), (128, 255, 'Latin-1 Supplement'),", "(8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'), (8704, 8959,", "11519, 'Coptic'), (11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'),", "129791, 'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK Unified Ideographs Extension B'), (173824,", "42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192, 42239, 'Lisu'), (42240, 42559, 'Vai'),", "Symbols'), (128896, 129023, 'Geometric Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental", "'Cyrillic Extended-C'), (7312, 7359, 'Georgian Extended'), (7360, 7375, 'Sundanese Supplement'), (7376, 7423, 'Vedic", "Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776,", "Mayek Extensions'), (43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee", "'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951, 'Hanunoo'), (5952, 5983, 'Buhid'),", "119679, 'Counting Rod Numerals'), (119808, 120831, 'Mathematical Alphanumeric Symbols'), (120832, 121519, 'Sutton SignWriting'),", "'<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq", "(65024, 65039, 'Variation Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining Half Marks'),", "126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong", "Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic Symbols'), (126976, 127023, 'Mahjong Tiles'), (127024,", "'Indic Siyaq Numbers'), (126208, 126287, 'Ottoman Siyaq Numbers'), (126464, 126719, 'Arabic Mathematical Alphabetic", "(11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392, 11519, 'Coptic'), (11520, 11567, 'Georgian", "'<NAME>'), (68608, 68687, 'Old Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216,", "'Siddham'), (71168, 71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487,", "'Mro'), (92880, 92927, '<NAME>'), (92928, 93071, '<NAME>'), (93760, 93855, 'Medefaidrin'), (93952, 94111, 'Miao'),", "'Aegean Numbers'), (65856, 65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047,", "(67840, 67871, 'Phoenician'), (67872, 67903, 'Lydian'), (67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic", "Supplement'), (127488, 127743, 'Enclosed Ideographic Supplement'), (127744, 128511, 'Miscellaneous Symbols and Pictographs'), (128512,", "Mathematical Symbols-A'), (10224, 10239, 'Supplemental Arrows-A'), (10240, 10495, 'Braille Patterns'), (10496, 10623, 'Supplemental", "Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616,", "<reponame>hpi-bp1819-naumann/shift-detector # from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python def block(character): \"\"\" Return the Unicode block name for", "Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791, 'Arabic'), (1792, 1871, 'Syriac'),", "'Cypriot Syllabary'), (67648, 67679, 'Imperial Aramaic'), (67680, 67711, 'Palmyrene'), (67712, 67759, 'Nabataean'), (67808,", "Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055, 'Enclosed", "Technical'), (9216, 9279, 'Control Pictures'), (9280, 9311, 'Optical Character Recognition'), (9312, 9471, 'Enclosed", "Extensions Supplement'), (7616, 7679, 'Combining Diacritical Marks Supplement'), (7680, 7935, 'Latin Extended Additional'),", "11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin Extended-C'), (11392,", "Turkic'), (68736, 68863, 'Old Hungarian'), (68864, 68927, '<NAME>'), (69216, 69247, 'Rumi Numeral Symbols'),", "(11520, 11567, 'Georgian Supplement'), (11568, 11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775,", "Shapes Extended'), (129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'), (129536,", "7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'), (7296,", "'<NAME>'), (73056, 73135, '<NAME>'), (73440, 73471, 'Makasar'), (73664, 73727, 'Tamil Supplement'), (73728, 74751,", "\"\"\" Return the Unicode block name for character, or None if character has", "(43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032, 55215, 'Hangul Syllables'), (55216, 55295,", "917631, 'Tags'), (917760, 917999, 'Variation Selectors Supplement'), (983040, 1048575, 'Supplementary Private Use Area-A'),", "(78896, 78911, 'Egyptian Hieroglyph Format Controls'), (82944, 83583, 'Anatolian Hieroglyphs'), (92160, 92735, 'Bamum", "5023, 'Ethiopic Supplement'), (5024, 5119, 'Cherokee'), (5120, 5759, 'Unified Canadian Aboriginal Syllabics'), (5760,", "Operators'), (11008, 11263, 'Miscellaneous Symbols and Arrows'), (11264, 11359, 'Glagolitic'), (11360, 11391, 'Latin", "11647, 'Tifinagh'), (11648, 11743, 'Ethiopic Extended'), (11744, 11775, 'Cyrillic Extended-A'), (11776, 11903, 'Supplemental", "'Old South Arabian'), (68224, 68255, 'Old North Arabian'), (68288, 68351, 'Manichaean'), (68352, 68415,", "Selectors'), (65040, 65055, 'Vertical Forms'), (65056, 65071, 'Combining Half Marks'), (65072, 65103, 'CJK", "(2144, 2159, 'Syriac Supplement'), (2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559,", "74751, 'Cuneiform'), (74752, 74879, 'Cuneiform Numbers and Punctuation'), (74880, 75087, 'Early Dynastic Cuneiform'),", "(129024, 129279, 'Supplemental Arrows-C'), (129280, 129535, 'Supplemental Symbols and Pictographs'), (129536, 129647, 'Chess", "'Hangul Jamo Extended-B'), (55296, 56191, 'High Surrogates'), (56192, 56319, 'High Private Use Surrogates'),", "(6144, 6319, 'Mongolian'), (6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'), (6400, 6479, 'Limbu'),", "72367, 'Soyombo'), (72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055,", "if character has no block. from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python :param character\"\"\" assert isinstance(character, str) and", "3327, 'Kannada'), (3328, 3455, 'Malayalam'), (3456, 3583, 'Sinhala'), (3584, 3711, 'Thai'), (3712, 3839,", "(66304, 66351, 'Old Italic'), (66352, 66383, 'Gothic'), (66384, 66431, 'Old Permic'), (66432, 66463,", "(69632, 69759, 'Brahmi'), (69760, 69839, 'Kaithi'), (69840, 69887, '<NAME>'), (69888, 69967, 'Chakma'), (69968,", "(70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399, 'Khudawadi'), (70400, 70527, 'Grantha'), (70656,", "'CJK Unified Ideographs Extension E'), (183984, 191471, 'CJK Unified Ideographs Extension F'), (194560,", "(66688, 66735, 'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'),", "(42240, 42559, 'Vai'), (42560, 42655, 'Cyrillic Extended-B'), (42656, 42751, 'Bamum'), (42752, 42783, 'Modifier", "(70112, 70143, 'Sinhala Archaic Numbers'), (70144, 70223, 'Khojki'), (70272, 70319, 'Multani'), (70320, 70399,", "71263, 'Modi'), (71264, 71295, 'Mongolian Supplement'), (71296, 71375, 'Takri'), (71424, 71487, 'Ahom'), (71680,", "'CJK Unified Ideographs Extension C'), (177984, 178207, 'CJK Unified Ideographs Extension D'), (178208,", "(12272, 12287, 'Ideographic Description Characters'), (12288, 12351, 'CJK Symbols and Punctuation'), (12352, 12447,", "'Osmanya'), (66736, 66815, 'Osage'), (66816, 66863, 'Elbasan'), (66864, 66927, 'Caucasian Albanian'), (67072, 67455,", "Extended-A'), (43648, 43743, 'Tai Viet'), (43744, 43775, 'Meetei Mayek Extensions'), (43776, 43823, 'Ethiopic", "(43776, 43823, 'Ethiopic Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968,", "(69216, 69247, 'Rumi Numeral Symbols'), (69376, 69423, 'Old Sogdian'), (69424, 69487, 'Sogdian'), (69600,", "'Tangut'), (100352, 101119, 'Tangut Components'), (110592, 110847, 'Kana Supplement'), (110848, 110895, 'Kana Extended-A'),", "(2208, 2303, 'Arabic Extended-A'), (2304, 2431, 'Devanagari'), (2432, 2559, 'Bengali'), (2560, 2687, 'Gurmukhi'),", "(43264, 43311, '<NAME>'), (43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487,", "'Cyrillic'), (1280, 1327, 'Cyrillic Supplement'), (1328, 1423, 'Armenian'), (1424, 1535, 'Hebrew'), (1536, 1791,", "43487, 'Javanese'), (43488, 43519, 'Myanmar Extended-B'), (43520, 43615, 'Cham'), (43616, 43647, 'Myanmar Extended-A'),", "'Bopomofo Extended'), (12736, 12783, 'CJK Strokes'), (12784, 12799, 'Katakana Phonetic Extensions'), (12800, 13055,", "Description Characters'), (12288, 12351, 'CJK Symbols and Punctuation'), (12352, 12447, 'Hiragana'), (12448, 12543,", "128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and Map Symbols'), (128768,", "(72384, 72447, '<NAME>'), (72704, 72815, 'Bhaiksuki'), (72816, 72895, 'Marchen'), (72960, 73055, '<NAME>'), (73056,", "40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi Syllables'), (42128, 42191, 'Yi Radicals'), (42192,", "65935, 'Ancient Greek Numbers'), (65936, 65999, 'Ancient Symbols'), (66000, 66047, 'Phaistos Disc'), (66176,", "'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'),", "129647, 'Chess Symbols'), (129648, 129791, 'Symbols and Pictographs Extended-A'), (131072, 173791, 'CJK Unified", "122927, 'Glagolitic Supplement'), (123136, 123215, '<NAME>'), (123584, 123647, 'Wancho'), (124928, 125151, '<NAME>'), (125184,", "'Buhid'), (5984, 6015, 'Tagbanwa'), (6016, 6143, 'Khmer'), (6144, 6319, 'Mongolian'), (6320, 6399, 'Unified", "repr(character) cp = ord(character) for start, end, name in blocks: if start <=", "591, 'Latin Extended-B'), (592, 687, 'IPA Extensions'), (688, 767, 'Spacing Modifier Letters'), (768,", "(7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'), (7248, 7295, 'Ol Chiki'),", "and Pictographs'), (128512, 128591, 'Emoticons'), (128592, 128639, 'Ornamental Dingbats'), (128640, 128767, 'Transport and", "(7376, 7423, 'Vedic Extensions'), (7424, 7551, 'Phonetic Extensions'), (7552, 7615, 'Phonetic Extensions Supplement'),", "E'), (183984, 191471, 'CJK Unified Ideographs Extension F'), (194560, 195103, 'CJK Compatibility Ideographs", "(68352, 68415, 'Avestan'), (68416, 68447, 'Inscriptional Parthian'), (68448, 68479, 'Inscriptional Pahlavi'), (68480, 68527,", "(124928, 125151, '<NAME>'), (125184, 125279, 'Adlam'), (126064, 126143, 'Indic Siyaq Numbers'), (126208, 126287,", "(1792, 1871, 'Syriac'), (1872, 1919, 'Arabic Supplement'), (1920, 1983, 'Thaana'), (1984, 2047, 'NKo'),", "4255, 'Myanmar'), (4256, 4351, 'Georgian'), (4352, 4607, 'Hang<NAME>'), (4608, 4991, 'Ethiopic'), (4992, 5023,", "(70400, 70527, 'Grantha'), (70656, 70783, 'Newa'), (70784, 70879, 'Tirhuta'), (71040, 71167, 'Siddham'), (71168,", "Compatibility Ideographs'), (64256, 64335, 'Alphabetic Presentation Forms'), (64336, 65023, 'Arabic Presentation Forms-A'), (65024,", "(12592, 12687, 'Hangul Compatibility Jamo'), (12688, 12703, 'Kanbun'), (12704, 12735, 'Bopomofo Extended'), (12736,", "Extended-A'), (43824, 43887, 'Latin Extended-E'), (43888, 43967, 'Cherokee Supplement'), (43968, 44031, '<NAME>'), (44032,", "'Takri'), (71424, 71487, 'Ahom'), (71680, 71759, 'Dogra'), (71840, 71935, '<NAME>'), (72096, 72191, 'Nandinagari'),", "(42784, 43007, 'Latin Extended-D'), (43008, 43055, 'Syloti Nagri'), (43056, 43071, 'Common Indic Number", "(67968, 67999, 'Meroitic Hieroglyphs'), (68000, 68095, 'Meroitic Cursive'), (68096, 68191, 'Kharoshthi'), (68192, 68223,", "(19904, 19967, 'Yijing Hexagram Symbols'), (19968, 40959, 'CJK Unified Ideographs'), (40960, 42127, 'Yi", "Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919, 'Tagalog'), (5920, 5951,", "5759, 'Unified Canadian Aboriginal Syllabics'), (5760, 5791, 'Ogham'), (5792, 5887, 'Runic'), (5888, 5919,", "10495, 'Braille Patterns'), (10496, 10623, 'Supplemental Arrows-B'), (10624, 10751, 'Miscellaneous Mathematical Symbols-B'), (10752,", "for Symbols'), (8448, 8527, 'Letterlike Symbols'), (8528, 8591, 'Number Forms'), (8592, 8703, 'Arrows'),", "(43312, 43359, 'Rejang'), (43360, 43391, 'Hangul Jamo Extended-A'), (43392, 43487, 'Javanese'), (43488, 43519,", "Extended'), (6912, 7039, 'Balinese'), (7040, 7103, 'Sundanese'), (7104, 7167, 'Batak'), (7168, 7247, 'Lepcha'),", "Use Surrogates'), (56320, 57343, 'Low Surrogates'), (57344, 63743, 'Private Use Area'), (63744, 64255," ]
[ "shortcut_list = [v for v in shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts", "# self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node, graph): #", "def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0 if self.n_original_time_series_in_node !=", "in cluster_centers: child_prototypes = [center] child_affinities = None child = Node(self.level + 1,", "counter else: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)]", "or clustering_threshold <= 1: clustering_threshold = 1 if level + 1 == max_level", "self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time", "class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window", "self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file +=", "self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame()", "in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node,", "= Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self,", "not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities)", "= np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node in self.node_shortcuts] d_matrix", "prototypes[indices] child_affinities = affinities[indices][:, indices] child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities,", "return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def", "or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list = [node.weight for node in", "self.query_score_chart = None self.node_shortcuts = None self.weights = None self.d_data_frame = None self._original_time_series_ids", "self._inverted_file += counter else: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child", "None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property def", "def query_vector(self): if self._query_vector is None: q_vector = np.array([node.q for node in self.node_shortcuts])", "None: timer.stop() timer.start() self._query_vector = None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer", "self if not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin", "= self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file", "not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)]", "and node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id +=", "np from sklearn.cluster import AffinityPropagation #import pydotplus as pydot from collections import Counter", "= pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, #", "def add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter else:", "len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0 if self.n_original_time_series_in_node != 0: w =", "axis=1) #score = 2-2*score if timer is not None: timer.stop() timer.start() order =", "is not None and len(indices) > 1: break return ap def _generate_children(self, affinities,", "cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_", "children.append(child) self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not self.is_leaf:", "= 0 if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not", "= len(ap.cluster_centers_indices_) if indices is not None else None print(\"n_clusters = {0}\".format(n_clusters)) if", "self.next_subsequence_id self.next_subsequence_id += 1 return id_ def make_query(self, time_series, timer=None): if timer is", "ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is not None else None print(\"n_clusters =", "in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return self._query_vector @property", "prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node, graph):", "affinities[indices][:, indices] child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter,", "= (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids)", "if timer is not None: timer.stop() timer.start() self._query_vector = None for subsequence in", "parent_id = parent._id if parent is not None else None print(\"-- NODE {0}", "if n_clusters is None or n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers,", "self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if", "if timer is not None: timer.stop() timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order]", "self.node_shortcuts = None self.weights = None self.d_data_frame = None self._original_time_series_ids = None self._query_vector", "series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for", "self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self):", "self._weighted = weighted self.max_level = max_level self.center = center self.parent = parent self.get_original_time_series_ids_in_tree", "is not None else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level", "in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T /", "max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted", "self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is", "id_ def make_query(self, time_series, timer=None): if timer is not None: timer.start() subsequences =", "run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap", "# graph.add_node(graph_node) # if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) #", "for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop() timer.start() not_zero_node_ids", "add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is", "self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0:", "clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = [] for cluster_label,", "0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1 return id_", "children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not self.is_leaf: distances = [time_series_twed(subsequence,", "axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame =", "inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree())", "= self.n_nodes self.n_nodes += 1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids():", "self._weighted: w = 1 except AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return", "if indices is not None else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is", "in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices]", "q_norm return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame()", "self.children is None @property def inverted_file(self): if self._inverted_file is None: inverted_file = Counter()", "pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node: def", "def add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node", "1 == max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold)", "candidate_preferences = np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference", "None self.n_nodes = 0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold)", "prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted =", "1 if level + 1 == max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else:", "return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is", "shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in shortcut_dict.values() if not", "for subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() # def", "return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1 return", "[node.weight for node in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list =", "pandas as pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step,", "1: clustering_threshold = 1 if level + 1 == max_level or len(prototypes) <=", "Error caught\") print(\"weight = {0}\".format(w)) return w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree)", "indices is not None and len(indices) > 1: break return ap def _generate_children(self,", "@property def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property", "next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1", "AffinityPropagation #import pydotplus as pydot from collections import Counter from distance_utils import time_series_twed", "= [node.weight for node in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list", "def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1 return n_nodes return _get_next_node_id def", "def calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self, level, max_level, prototypes, affinities, center,", "index = ids.index(key) m[index] = value return m @property def q(self): if self.n_query_subsequences", "# if self.children is not None: # for child in self.children: # child.add_to_graph(graph_node,", "= pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node:", "None self._inverted_file = None if clustering_threshold is None or clustering_threshold <= 1: clustering_threshold", "self.root.inverted_file class Node: def __init__(self, level, max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter,", "0 if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted:", "def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids =", "child_affinities = affinities[indices][:, indices] child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center,", "in subsequences: self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector", "shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list =", "None and len(indices) > 1: break return ap def _generate_children(self, affinities, next_node_id_getter, prototypes,", "# def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None,", "= np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix ==", "n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is None: q_vector = np.array([node.q", "= original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id if parent is not None", "np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids]", "def _build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities, None, None,", "shape = affinities.shape if affinities is not None else None print(\"affinities shape =", "None self.node_shortcuts = None self.weights = None self.d_data_frame = None self._original_time_series_ids = None", "return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf: for child", "node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self):", "d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] =", "from distance_utils import time_series_twed import pandas as pd class SubsequenceTree: def __init__(self, max_level,", "not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None: timer.stop()", "self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences >", "subsequence): self.n_query_subsequences += 1 if not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node", "{0}\".format(n_clusters)) if n_clusters is None or n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter,", "print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step):", "@property def q(self): if self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight @property def", "None if clustering_threshold is None or clustering_threshold <= 1: clustering_threshold = 1 if", "self.query_ts = None self.query_score_chart = None self.node_shortcuts = None self.weights = None self.d_data_frame", "Counter from distance_utils import time_series_twed import pandas as pd class SubsequenceTree: def __init__(self,", "in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict", "{0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape if affinities is", "> 1: break return ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap =", "self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def", "distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def", "def make_query(self, time_series, timer=None): if timer is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window,", "0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector()", "just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in shortcut_dict.values() if", "self.children = None self._inverted_file = None if clustering_threshold is None or clustering_threshold <=", "prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold,", "+= 1 if not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node in self.children]", "is not None else None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0", "preference in candidate_preferences: ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is", "node in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for", "self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None: timer.stop() timer.start() score", "child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def", "get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1 return n_nodes return _get_next_node_id", "__init__(self, level, max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level =", "self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class", "subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances =", "self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list = [node.weight for node in self.node_shortcuts] self.weights", "nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter", "None @property def inverted_file(self): if self._inverted_file is None: inverted_file = Counter() for child", "score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer is not None: timer.stop()", "= list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if", "def __init__(self, level, max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level", "+= counter else: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child =", "not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None: timer.stop() timer.start() score =", "id_ = self.next_subsequence_id self.next_subsequence_id += 1 return id_ def make_query(self, time_series, timer=None): if", "= next_node_id_getter() parent_id = parent._id if parent is not None else None print(\"--", "self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None:", "child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self, subsequence):", "self._inverted_file is None: inverted_file = Counter() for child in self.children: inverted_file += child.inverted_file", "if not self._weighted: w = 1 except AttributeError: print(\"Attribute Error caught\") print(\"weight =", "None: q_vector = np.array([node.q for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector =", "0 self.children = None self._inverted_file = None if clustering_threshold is None or clustering_threshold", "prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children =", "timer is not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score", "= value return m @property def q(self): if self.n_query_subsequences is None: return None", "inverted_file += child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file)", "self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences", "self.level = level self._weighted = weighted self.max_level = max_level self.center = center self.parent", "# if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children", "original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id if parent is not None else", "self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node, graph): # graph_node", "#score = 2-2*score if timer is not None: timer.stop() timer.start() order = np.argsort(score)", "#import pydotplus as pydot from collections import Counter from distance_utils import time_series_twed import", "= self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None: timer.stop() timer.start()", "self.n_original_time_series_in_node) try: if not self._weighted: w = 1 except AttributeError: print(\"Attribute Error caught\")", "length = {0}\".format(len(prototypes))) shape = affinities.shape if affinities is not None else None", "self._id = next_node_id_getter() parent_id = parent._id if parent is not None else None", "@property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self):", "value in self.inverted_file.items(): index = ids.index(key) m[index] = value return m @property def", "w = 0 if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if", "weights_list = [node.weight for node in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False):", "Node: def __init__(self, level, max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True):", "add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for", "= np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return self._query_vector @property def _queried_time_series_ids(self): return", "+= 1 return id_ def make_query(self, time_series, timer=None): if timer is not None:", "np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf]", "if clustering_threshold is None or clustering_threshold <= 1: clustering_threshold = 1 if level", "self._inverted_file = inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self):", "node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter", "try: if not self._weighted: w = 1 except AttributeError: print(\"Attribute Error caught\") print(\"weight", "pydotplus as pydot from collections import Counter from distance_utils import time_series_twed import pandas", "= {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children = None self._inverted_file = None if", "= [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self):", "= None self.d_data_frame = None self._original_time_series_ids = None self._query_vector = None self.n_nodes =", "collections import Counter from distance_utils import time_series_twed import pandas as pd class SubsequenceTree:", "for center in cluster_centers: child_prototypes = [center] child_affinities = None child = Node(self.level", "time_series, timer=None): if timer is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if", "is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property", "get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes =", "if timer is not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector", "node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T", "1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels =", "= prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = [] for cluster_label, center in zip(range(n_clusters),", "None self._original_time_series_ids = None self._query_vector = None self.n_nodes = 0 self._weighted = weighted", "m @property def q(self): if self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight @property", "next_node_id_getter() parent_id = parent._id if parent is not None else None print(\"-- NODE", "--\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape", "= (subsequence.original_id # for subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file =", "self._query_vector = q_vector / q_norm return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def", "self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating", "return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0 if self.n_original_time_series_in_node != 0: w", "clustering_threshold = 1 if level + 1 == max_level or len(prototypes) <= clustering_threshold:", "child_affinities = None child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self,", "clustering_threshold <= 1: clustering_threshold = 1 if level + 1 == max_level or", "m[index] = value return m @property def q(self): if self.n_query_subsequences is None: return", "max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step =", "None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop() timer.start()", "subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self, level, max_level, prototypes,", "d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence):", "_build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm", "w = 1 except AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return w", "self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self):", "np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop() return result def", "<= 1: clustering_threshold = 1 if level + 1 == max_level or len(prototypes)", "time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step = time_step self.max_level = max_level", "is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None:", "self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return", "return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # #", "time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step = time_step self.max_level = max_level #self.graph", "= time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart =", "= 1 if level + 1 == max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file()", "print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list =", "= Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child)", "graph.add_node(graph_node) # if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if", "clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return self.children is", "m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index =", "= [node.d_vector for node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1)", "def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf", "= {0}\".format(w)) return w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree()", "= self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not", "[time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): #", "None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is not None: # for", "print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series): print(ts) for", "not None and len(indices) > 1: break return ap def _generate_children(self, affinities, next_node_id_getter,", "_get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1 return n_nodes", "tree\") print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in", "# def add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if", "w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w = 1 except AttributeError:", "pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node))", "2-2*score if timer is not None: timer.stop() timer.start() order = np.argsort(score) result =", "not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None: timer.stop()", "preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is not None and len(indices) >", "n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_]", "else: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence)", "print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def", "not None: timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences = 0 if timer", "return self.root.inverted_file class Node: def __init__(self, level, max_level, prototypes, affinities, center, parent, next_node_id_getter,", "shortcut_dict[self._id] = self if not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def", "self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id if", "[] for center in cluster_centers: child_prototypes = [center] child_affinities = None child =", "self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in shortcut_dict.values() if not just_leaves or v.is_leaf]", "self.root = Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def", "is None: return None return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self,", "original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted = weighted self.max_level = max_level self.center", "time_series_twed import pandas as pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series,", "= prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children", "= Counter() for child in self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file return", "AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if", "None return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] =", "for key, value in self.inverted_file.items(): index = ids.index(key) m[index] = value return m", "return m @property def q(self): if self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight", "= inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return", "timer=None): if timer is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer", "time_window self.time_step = time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None", "not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids", "caught\") print(\"weight = {0}\".format(w)) return w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids", "#print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict)", "print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return w @property def m_vector(self): m =", "center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id", "prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step = time_step", "= time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None: timer.stop() timer.start() for node in", "#self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None self.node_shortcuts = None self.weights", "n_clusters = len(ap.cluster_centers_indices_) if indices is not None else None print(\"n_clusters = {0}\".format(n_clusters))", "<= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return self.children", "d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix),", "get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1 return id_ def make_query(self, time_series, timer=None):", "subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self,", "graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node,", "as np from sklearn.cluster import AffinityPropagation #import pydotplus as pydot from collections import", "center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers,", "if timer is not None: timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences =", "= parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id if parent", "{0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children = None self._inverted_file = None if clustering_threshold", "0 if timer is not None: timer.stop() timer.start() self._query_vector = None for subsequence", "clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is", "self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is not None else", "timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences = 0 if timer is not", "10) ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference = preference ap.fit(affinities) indices", "n_nodes = self.n_nodes self.n_nodes += 1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def", "1: break return ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities)", "get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): # self.generate_graph() #", "children = [] for center in cluster_centers: child_prototypes = [center] child_affinities = None", "children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = [] for center in cluster_centers:", "ids = self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index = ids.index(key) m[index] =", "= preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is not None and len(indices)", "self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self)) #", "smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference = preference ap.fit(affinities)", "for child in self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property", "return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0", "<filename>subsequence_tree.py import numpy as np from sklearn.cluster import AffinityPropagation #import pydotplus as pydot", "time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None", "self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf: for child in", "self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children =", "@property def is_leaf(self): return self.children is None @property def inverted_file(self): if self._inverted_file is", "index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self,", "child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter,", "original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self):", "result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id():", "def weight(self): w = 0 if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node)", "= np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed')", "_build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(),", "if self._query_vector is None: q_vector = np.array([node.q for node in self.node_shortcuts]) q_norm =", "self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def", "self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = [] for center", "if parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is", "None: timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def", "self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for subsequence in prototypes)", "/ q_norm return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector()", "= None if clustering_threshold is None or clustering_threshold <= 1: clustering_threshold = 1", "self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids", "print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window,", "1}) self._inverted_file += counter else: distances = [time_series_twed(subsequence, node.center) for node in self.children]", "is not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if", "prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf and", "node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return self._query_vector", "= 0 self.children = None self._inverted_file = None if clustering_threshold is None or", "timer is not None: timer.stop() timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if", "cluster_label, center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities =", "self._inverted_file = None if clustering_threshold is None or clustering_threshold <= 1: clustering_threshold =", "= {0}\".format(len(prototypes))) shape = affinities.shape if affinities is not None else None print(\"affinities", "from collections import Counter from distance_utils import time_series_twed import pandas as pd class", "self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1)", "= {0}\".format(n_clusters)) if n_clusters is None or n_clusters == 1: cluster_centers = prototypes", "self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold):", "# def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root =", "node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1 return id_ def make_query(self,", "= np.array([node.q for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector /", "i, ts in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0}", "return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self", "for node in self.node_shortcuts: node.n_query_subsequences = 0 if timer is not None: timer.stop()", "ap.labels_ children = [] for cluster_label, center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0]", "return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts:", "not None else None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children", "== max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property", "level, max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level", "for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf:", "@property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not", "class Node: def __init__(self, level, max_level, prototypes, affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold,", "+= child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property", "if not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list = [node.weight", "/ d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self,", "= None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop()", "len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return", "not self._weighted: w = 1 except AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w))", "timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids", "yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1 return id_ def", "cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = [] for cluster_label, center in", "None: timer.stop() timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is not", "if self._inverted_file is None: inverted_file = Counter() for child in self.children: inverted_file +=", "1 return id_ def make_query(self, time_series, timer=None): if timer is not None: timer.start()", "for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self,", "affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_)", "def run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10)", "ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is not None and", "= None self.n_nodes = 0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences,", "save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None, self.graph) def", "return self.children is None @property def inverted_file(self): if self._inverted_file is None: inverted_file =", "center in cluster_centers: child_prototypes = [center] child_affinities = None child = Node(self.level +", "{} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in shortcut_dict.values() if not just_leaves or", "smin = np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap =", "d_list = [node.d_vector for node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix,", "weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def", "self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities, None,", "not None: timer.stop() timer.start() self._query_vector = None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if", "= Counter() # def add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node)", "db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step = time_step self.max_level =", "None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids =", "result = not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop() return result def get_db_subsequences_dict(self):", "level self._weighted = weighted self.max_level = max_level self.center = center self.parent = parent", "child_prototypes = [center] child_affinities = None child = Node(self.level + 1, self.max_level, child_prototypes,", "children = [] for cluster_label, center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes", "_get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): #", "affinities is not None else None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences =", "q_vector = np.array([node.q for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector", "n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def", "self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities,", "next_node_id_getter, cluster_centers, clustering_threshold): children = [] for center in cluster_centers: child_prototypes = [center]", "-np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer is not None: timer.stop() timer.start() order", "parent._id if parent is not None else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent", "self.n_nodes self.n_nodes += 1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return", "clustering_threshold) @property def is_leaf(self): return self.children is None @property def inverted_file(self): if self._inverted_file", "= self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values,", "ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict =", "w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value", "1 if not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child", "zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices] child", "self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1", "self._query_vector is None: q_vector = np.array([node.q for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector)", "in self.inverted_file.items(): index = ids.index(key) m[index] = value return m @property def q(self):", "node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id", "self.d_data_frame = None self._original_time_series_ids = None self._query_vector = None self.n_nodes = 0 self._weighted", "subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop() timer.start() not_zero_node_ids =", "if not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin =", "inverted_file = Counter() for child in self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file", "print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series):", "self.node_shortcuts: node.n_query_subsequences = 0 if timer is not None: timer.stop() timer.start() self._query_vector =", "in candidate_preferences: ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is not", "None or n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers", "= AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_", "= not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop() return result def get_db_subsequences_dict(self): def", "= ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is not None else None print(\"n_clusters", "+ 1 == max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes,", "np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self):", "is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is not None:", "= np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices] child = Node(self.level +", "for node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys() def", "window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series): print(ts)", "_build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in shortcut_dict.values()", "distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def", "added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v", "self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = []", "self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property", "and len(indices) > 1: break return ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold):", "import time_series_twed import pandas as pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities,", "node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if", "ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is not", "def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0,", "None else None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children =", "timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe", "{0}\".format(len(prototypes))) shape = affinities.shape if affinities is not None else None print(\"affinities shape", "cluster_centers: child_prototypes = [center] child_affinities = None child = Node(self.level + 1, self.max_level,", "indices is not None else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None", "@property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value in", "q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return self._query_vector @property def _queried_time_series_ids(self):", "not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer", "_get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes", "self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return self._query_vector @property def", "= {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape if affinities", "prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property def", "print(\"populating tree\") print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts", "cluster_centers, clustering_threshold): children = [] for center in cluster_centers: child_prototypes = [center] child_affinities", "= level self._weighted = weighted self.max_level = max_level self.center = center self.parent =", "is None: inverted_file = Counter() for child in self.children: inverted_file += child.inverted_file self._inverted_file", "affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(),", "not_zero_node_ids] if timer is not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score", "parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id if parent is", "None or clustering_threshold <= 1: clustering_threshold = 1 if level + 1 ==", "next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children", "_generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = [] for center in cluster_centers: child_prototypes =", "= Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node, graph): # graph_node =", "in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter =", "children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = [] for", "if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return", "not None: timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict", "n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0 if self.n_original_time_series_in_node != 0:", "in self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property def n_original_time_series_in_node(self):", "if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances = [time_series_twed(subsequence,", "self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id:", "self.n_nodes = 0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series)", "== np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def", "self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self, level, max_level, prototypes, affinities,", "self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0]", "in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_", "parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted = weighted self.max_level =", "!= 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w = 1", "is not None else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None or", "is None or n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return", "query_vector(self): if self._query_vector is None: q_vector = np.array([node.q for node in self.node_shortcuts]) q_norm", "np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict)", "(d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def", "return None return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id]", "self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not self.is_leaf: distances", "self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time step')", "def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items():", "subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None: timer.stop() timer.start() for node", "1 except AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return w @property def", "None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window)", "weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series))", "graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is not None: # for child in", "if self.children is not None: # for child in self.children: # child.add_to_graph(graph_node, graph)", "is None: q_vector = np.array([node.q for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector", "self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return self.children is None @property def", "np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node in self.node_shortcuts] d_matrix =", "shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children = None self._inverted_file = None", "clustering_threshold is None or clustering_threshold <= 1: clustering_threshold = 1 if level +", "print(\"weight = {0}\".format(w)) return w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids =", "AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return w @property def m_vector(self): m", "[node.d_vector for node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix", "as pydot from collections import Counter from distance_utils import time_series_twed import pandas as", "for v in shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def", "= self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index = ids.index(key) m[index] = value", "= children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not self.is_leaf: distances =", "weighted=True): self.level = level self._weighted = weighted self.max_level = max_level self.center = center", "for node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix =", "nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for subsequence in prototypes) #", "not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict():", "_get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self):", "def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w", "self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax", "# self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes,", "= ap.cluster_centers_indices_ if indices is not None and len(indices) > 1: break return", "sklearn.cluster import AffinityPropagation #import pydotplus as pydot from collections import Counter from distance_utils", "else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length", "print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape if affinities is not", "return _get_original_time_series_ids # def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self):", "return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = [] for cluster_label, center", "timer is not None: timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences = 0", "# # def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root", "ts in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time", "= 0 if timer is not None: timer.stop() timer.start() self._query_vector = None for", "= None self._inverted_file = None if clustering_threshold is None or clustering_threshold <= 1:", "= [] for center in cluster_centers: child_prototypes = [center] child_affinities = None child", "_queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys()", "def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in", "in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id #", "= [center] child_affinities = None child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities,", "!= 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if", "None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None: timer.stop() timer.start()", "indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices] child = Node(self.level", "self.n_query_subsequences += 1 if not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node in", "def inverted_file(self): if self._inverted_file is None: inverted_file = Counter() for child in self.children:", "value return m @property def q(self): if self.n_query_subsequences is None: return None return", "@property def weight(self): w = 0 if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/", "ap.cluster_centers_indices_ if indices is not None and len(indices) > 1: break return ap", "None self._query_vector = None self.n_nodes = 0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list)", "None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None or n_clusters == 1: cluster_centers", "= ap.labels_ children = [] for cluster_label, center in zip(range(n_clusters), cluster_centers): indices =", "return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return", "ids.index(key) m[index] = value return m @property def q(self): if self.n_query_subsequences is None:", "parent_graph_node is not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is not", "if not self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child =", "Counter() for child in self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file return self._inverted_file", "= np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w = 1 except AttributeError: print(\"Attribute", "node in self.node_shortcuts: node.n_query_subsequences = 0 if timer is not None: timer.stop() timer.start()", "affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step = time_step self.max_level", "self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is None:", "timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer is not None:", "for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return", "Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children", "__init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step", "# self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes,", "None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer is", "Counter() # def add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) #", "just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list = [node.weight for node", "key, value in self.inverted_file.items(): index = ids.index(key) m[index] = value return m @property", "parent_graph_node, graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is not", "affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time", "for cluster_label, center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities", "self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {}", "center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted = weighted self.max_level", "self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is None:", "def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes += 1 return n_nodes return", "len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0 if", "is None or clustering_threshold <= 1: clustering_threshold = 1 if level + 1", "None else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes", "@property def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids", "[center] child_affinities = None child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center,", "NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape", "print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series): print(ts) for subsequence", "def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf:", "= [] for cluster_label, center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes =", "m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index", "_populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for", "# original_time_series_id = (subsequence.original_id # for subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id)", "not None else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level))", "child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin,", "= np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return", "next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted = weighted self.max_level = max_level", "return self._inverted_file @property def n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property", "timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self):", "cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = [] for", "next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if", "self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances = [time_series_twed(subsequence, node.center)", "is None @property def inverted_file(self): if self._inverted_file is None: inverted_file = Counter() for", "self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index = ids.index(key) m[index] = value return", "self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None self.node_shortcuts", "clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = []", "timer is not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector =", "self.time_window = time_window self.time_step = time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts", "None child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree,", "v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list = [node.weight for node in self.node_shortcuts]", "calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self, level, max_level, prototypes, affinities, center, parent,", "else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return self.children is None @property", "self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if", "def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window = time_window", "center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:,", "center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences", "for node in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector", "d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix", "ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_", "= parent._id if parent is not None else None print(\"-- NODE {0} --\".format(self._id))", "timer is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is not", "not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe =", "1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids", "for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax =", "len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids", "= weighted self.max_level = max_level self.center = center self.parent = parent self.get_original_time_series_ids_in_tree =", "0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w = 1 except", "def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf: for child in self.children:", "= time_window self.time_step = time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts =", "self.next_subsequence_id += 1 return id_ def make_query(self, time_series, timer=None): if timer is not", "None: timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences = 0 if timer is", "self.time_step = time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart", "= None self.query_score_chart = None self.node_shortcuts = None self.weights = None self.d_data_frame =", "child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities)", "def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters", "timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer is not", "= [v for v in shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts =", "q(self): if self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight @property def d_vector(self): return", "import AffinityPropagation #import pydotplus as pydot from collections import Counter from distance_utils import", "weight(self): w = 0 if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try:", "None self.weights = None self.d_data_frame = None self._original_time_series_ids = None self._query_vector = None", "@staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax,", "db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i,", "self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list", "ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference = preference ap.fit(affinities) indices =", "is not None: timer.stop() timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer", "timer.start() for node in self.node_shortcuts: node.n_query_subsequences = 0 if timer is not None:", "if timer is not None: timer.stop() timer.start() score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score =", "def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes", "= self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is not None", "timer.start() self._query_vector = None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is not", "level + 1 == max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter,", "shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod", "indices] child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree,", "timer is not None: timer.stop() timer.start() self._query_vector = None for subsequence in subsequences:", "list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if", "@property def inverted_file(self): if self._inverted_file is None: inverted_file = Counter() for child in", "= self.next_subsequence_id self.next_subsequence_id += 1 return id_ def make_query(self, time_series, timer=None): if timer", "def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): # self.generate_graph()", "clustering_threshold, weighted=True): self.level = level self._weighted = weighted self.max_level = max_level self.center =", "n_original_time_series_in_node(self): return len(self.inverted_file) @property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w =", "self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if", "def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes", "make_query(self, time_series, timer=None): if timer is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step)", "self.time_step) if timer is not None: timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences", "= self if not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities):", "np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference = preference", "= -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1) #score = 2-2*score if timer is not None: timer.stop() timer.start()", "print(db_time_series) for i, ts in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence)", "clustering_threshold): children = [] for center in cluster_centers: child_prototypes = [center] child_affinities =", "np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator()))", "self.max_level = max_level self.center = center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id", "_get_original_time_series_ids # def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self): #", "if level + 1 == max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities,", "generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level,", "n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file)", "child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold)", "from sklearn.cluster import AffinityPropagation #import pydotplus as pydot from collections import Counter from", "_get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') #", "= pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None self.node_shortcuts = None self.weights =", "graph): # graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is not None:", "self._query_vector = None self.n_nodes = 0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities,", "just_leaves=False): d_list = [node.d_vector for node in self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm =", "is_leaf(self): return self.children is None @property def inverted_file(self): if self._inverted_file is None: inverted_file", "np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w = 1 except AttributeError: print(\"Attribute Error", "labels = ap.labels_ children = [] for cluster_label, center in zip(range(n_clusters), cluster_centers): indices", "shortcut_list def _build_weights_vector(self): weights_list = [node.weight for node in self.node_shortcuts] self.weights = np.array(weights_list)", "= np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop() return result", "break return ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices", "self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids)", "= max_level self.center = center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id =", "for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id =", "= {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v for v in shortcut_dict.values() if not just_leaves", "clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not", "def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = [] for center in cluster_centers: child_prototypes", "print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape if affinities is not None else", "self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node in self.node_shortcuts]", "def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time step') print(self.time_step) print(type(db_time_series)) print(db_time_series)", "indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices is not None else None", "node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1", "0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file", "self.inverted_file.items(): index = ids.index(key) m[index] = value return m @property def q(self): if", "np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences:", "= children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold): children = [] for center in", "def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node", "self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self, subsequence): self.n_query_subsequences +=", "prototypes, clustering_threshold) @property def is_leaf(self): return self.children is None @property def inverted_file(self): if", "if indices is not None and len(indices) > 1: break return ap def", "_generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for subsequence in prototypes) # self._inverted_file =", "v in shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self):", "if timer is not None: timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is", "time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None: timer.stop() timer.start() for node in self.node_shortcuts:", "or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self):", "self.n_query_subsequences = 0 self.children = None self._inverted_file = None if clustering_threshold is None", "= None self._query_vector = None self.n_nodes = 0 self._weighted = weighted prototype_subsequences =", "= prototypes[indices] child_affinities = affinities[indices][:, indices] child = Node(self.level + 1, self.max_level, child_prototypes,", "node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id", "[time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence):", "return self.db_subsequences_dict return _get_db_subsequences_dict def get_next_node_id(self): def _get_next_node_id(): n_nodes = self.n_nodes self.n_nodes +=", "child in self.children: inverted_file += child.inverted_file self._inverted_file = inverted_file return self._inverted_file @property def", "not None: timer.stop() timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is", "max_level or len(prototypes) <= clustering_threshold: self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def", "len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is None: q_vector = np.array([node.q for node", "child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices] child = Node(self.level + 1, self.max_level,", "= None self._original_time_series_ids = None self._query_vector = None self.n_nodes = 0 self._weighted =", "def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None, self.graph)", "step') print(self.time_step) print(type(db_time_series)) print(db_time_series) for i, ts in enumerate(db_time_series): print(ts) for subsequence in", "clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window') print(self.time_window) print('time step') print(self.time_step)", "prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self):", "None print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length =", "= np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index = ids.index(key)", "None: return None return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict):", "original_time_series_id = (subsequence.original_id # for subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file", "clustering_threshold=1, weighted=True): self.time_window = time_window self.time_step = time_step self.max_level = max_level #self.graph =", "def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1 return id_ def make_query(self, time_series,", "# self.generate_graph() # self.graph.write_png('graph.png') # # def generate_graph(self): # self.root.add_to_graph(None, self.graph) def _build_tree(self,", "= 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return", "not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list = [node.weight for", "pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True):", "time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False): shortcut_dict = {} self.root.add_shortcut_to_dict(shortcut_dict) shortcut_list = [v", "next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return self.children is None @property def inverted_file(self):", "self._generate_inverted_file() else: self._generate_children(affinities, next_node_id_getter, prototypes, clustering_threshold) @property def is_leaf(self): return self.children is None", "child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self,", "else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None or n_clusters == 1:", "[] for cluster_label, center in zip(range(n_clusters), cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices]", "= 2-2*score if timer is not None: timer.stop() timer.start() order = np.argsort(score) result", "Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances = [time_series_twed(subsequence, node.center) for node in", "None else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None or n_clusters ==", "{0}\".format(w)) return w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for", "in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series", "np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key, value in self.inverted_file.items(): index = ids.index(key) m[index]", "return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is None: q_vector = np.array([node.q for", "self.n_nodes += 1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids", "= None self.weights = None self.d_data_frame = None self._original_time_series_ids = None self._query_vector =", "+= 1 return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return", "= affinities.shape if affinities is not None else None print(\"affinities shape = {0}\".format(shape))", "{0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape if affinities is not None", "timer.start() subsequences = time_series.run_sliding_window(self.time_window, self.time_step) if timer is not None: timer.stop() timer.start() for", "self.root.add_to_graph(None, self.graph) def _build_tree(self, affinities, prototypes, clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities,", "(subsequence.original_id # for subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter()", "graph_node)) # if self.children is not None: # for child in self.children: #", "as pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1,", "not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer is", "parent is not None else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id))", "is not None: timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict return", "print(\"-- NODE {0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes)))", "def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node in self.node_shortcuts] d_matrix = np.column_stack(d_list)", "not None: # graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is not None: #", "len(ap.cluster_centers_indices_) if indices is not None else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters", "@property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for", "self.is_leaf: distances = [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence)", "timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop()", "d_vector(self): return self.weight*self.m_vector def add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf: for", "timer.stop() timer.start() order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is not None:", "self.center = center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id", "print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape = affinities.shape if", "np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices] child = Node(self.level + 1,", "pydot from collections import Counter from distance_utils import time_series_twed import pandas as pd", "ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is not None and len(indices) > 1:", "import numpy as np from sklearn.cluster import AffinityPropagation #import pydotplus as pydot from", "add_shortcut_to_dict(self, shortcut_dict): shortcut_dict[self._id] = self if not self.is_leaf: for child in self.children: child.add_shortcut_to_dict(shortcut_dict)", "= None child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter,", "{0} --\".format(self._id)) print(\"parent = {0}\".format(parent_id)) print(\"level {0}\".format(level)) print(\"prototypes length = {0}\".format(len(prototypes))) shape =", "for i, ts in enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence)", "@property def query_vector(self): if self._query_vector is None: q_vector = np.array([node.q for node in", "if self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w", "_generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters =", "in self.node_shortcuts: node.n_query_subsequences = 0 if timer is not None: timer.stop() timer.start() self._query_vector", "return ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices =", "+ 1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children =", "clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property def n_subsequences(self): return len(self.db_subsequences_dict) @property def original_time_series_ids(self):", "= affinities[indices][:, indices] child = Node(self.level + 1, self.max_level, child_prototypes, child_affinities, center, self,", "= np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] = 0", "add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances", "self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id = parent._id if parent is not", "None: inverted_file = Counter() for child in self.children: inverted_file += child.inverted_file self._inverted_file =", "len(indices) > 1: break return ap def _generate_children(self, affinities, next_node_id_getter, prototypes, clustering_threshold): ap", "def original_time_series_ids(self): if self._original_time_series_ids is None: self._original_time_series_ids = list(self.root.inverted_file) return self._original_time_series_ids @property def", "in shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list def _build_weights_vector(self): weights_list", "prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\")", "self._original_time_series_ids = None self._query_vector = None self.n_nodes = 0 self._weighted = weighted prototype_subsequences", "subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i)) def _build_node_shorcuts(self, just_leaves=False):", "_build_weights_vector(self): weights_list = [node.weight for node in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self,", "self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node in", "return n_nodes return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids #", "def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png')", "order = np.argsort(score) result = not_zero_d_dataframe.index.values[order] if timer is not None: timer.stop() return", "max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None self.node_shortcuts = None", "> 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id self.next_subsequence_id += 1 return", "print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children = None self._inverted_file =", "= shortcut_list def _build_weights_vector(self): weights_list = [node.weight for node in self.node_shortcuts] self.weights =", "# graph_node = pydot.Node(str(self)) # graph.add_node(graph_node) # if parent_graph_node is not None: #", "prototypes, clustering_threshold): ap = self.run_affinity_propagation(affinities) indices = ap.cluster_centers_indices_ n_clusters = len(ap.cluster_centers_indices_) if indices", "if timer is not None: timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return", "= center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter() parent_id =", "if parent is not None else None print(\"-- NODE {0} --\".format(self._id)) print(\"parent =", "counter = Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances = [time_series_twed(subsequence, node.center) for", "prototypes[ap.cluster_centers_indices_] labels = ap.labels_ children = [] for cluster_label, center in zip(range(n_clusters), cluster_centers):", "self.node_shortcuts] d_matrix = np.column_stack(d_list) d_norm = np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T", "return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def", "self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ =", "_add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self, level, max_level,", "if node.is_leaf and node.n_query_subsequences > 0: yield node.inverted_file.keys() def get_next_subsequence_id(self): id_ = self.next_subsequence_id", "@property def n_original_time_series_in_tree(self): return len(self.get_original_time_series_ids_in_tree()) @property def weight(self): w = 0 if self.n_original_time_series_in_node", "smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference", "def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is None: q_vector =", "print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None or n_clusters == 1: cluster_centers =", "def q(self): if self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight @property def d_vector(self):", "affinities, center, parent, next_node_id_getter, original_time_series_ids_getter, clustering_threshold, weighted=True): self.level = level self._weighted = weighted", "def _queried_time_series_ids_iterator(self): for node in self.node_shortcuts: if node.is_leaf and node.n_query_subsequences > 0: yield", "= 0 self._weighted = weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts()", "except AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return w @property def m_vector(self):", "numpy as np from sklearn.cluster import AffinityPropagation #import pydotplus as pydot from collections", "nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self, subsequence): if self.is_leaf: counter = Counter({subsequence.original_id: 1})", "inverted_file(self): if self._inverted_file is None: inverted_file = Counter() for child in self.children: inverted_file", "self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector def", "self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self): # self.generate_graph() # self.graph.write_png('graph.png') # # def", "distance_utils import time_series_twed import pandas as pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list,", "pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None self.node_shortcuts = None self.weights = None", "self.n_original_time_series_in_node != 0: w = np.log(self.n_original_time_series_in_tree/ self.n_original_time_series_in_node) try: if not self._weighted: w =", "print(\"\") self.n_query_subsequences = 0 self.children = None self._inverted_file = None if clustering_threshold is", "if self.n_query_subsequences is None: return None return self.n_query_subsequences*self.weight @property def d_vector(self): return self.weight*self.m_vector", "in self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities) candidate_preferences", "None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children = None self._inverted_file", "not None else None print(\"n_clusters = {0}\".format(n_clusters)) if n_clusters is None or n_clusters", "np.array([node.q for node in self.node_shortcuts]) q_norm = np.linalg.norm(q_vector) self._query_vector = q_vector / q_norm", "node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id", "return _get_next_node_id def get_original_time_series_ids(self): def _get_original_time_series_ids(): return self.original_time_series_ids return _get_original_time_series_ids # def save_graph(self):", "def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for subsequence in prototypes) # self._inverted_file", "= np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference in candidate_preferences: ap.preference =", "self.children: child.add_shortcut_to_dict(shortcut_dict) @staticmethod def run_affinity_propagation(affinities): smin = np.min(affinities) smax = np.max(affinities) candidate_preferences =", "is not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids]", "def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence) def calculate_inverted_files(self): return self.root.inverted_file class Node: def __init__(self, level,", "indices = ap.cluster_centers_indices_ if indices is not None and len(indices) > 1: break", "child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def add_query_subsequence(self,", "= Counter({subsequence.original_id: 1}) self._inverted_file += counter else: distances = [time_series_twed(subsequence, node.center) for node", "timer.stop() timer.start() self._query_vector = None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is", "for preference in candidate_preferences: ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices", "is not None: timer.stop() timer.start() for node in self.node_shortcuts: node.n_query_subsequences = 0 if", "q_vector / q_norm return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True)", "Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series):", "self.weights = None self.d_data_frame = None self._original_time_series_ids = None self._query_vector = None self.n_nodes", "list(self.root.inverted_file) return self._original_time_series_ids @property def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector", "enumerate(db_time_series): print(ts) for subsequence in ts.run_sliding_window(self.time_window, self.time_step): #print(subsequence) self._add_subsequence(subsequence) print(\"{0} time series added\".format(i))", "affinities.shape if affinities is not None else None print(\"affinities shape = {0}\".format(shape)) print(\"\")", "node.n_query_subsequences = 0 if timer is not None: timer.stop() timer.start() self._query_vector = None", "max_level self.center = center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter self._id = next_node_id_getter()", "import pandas as pd class SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window,", "def is_leaf(self): return self.children is None @property def inverted_file(self): if self._inverted_file is None:", "= None self.node_shortcuts = None self.weights = None self.d_data_frame = None self._original_time_series_ids =", "self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children def _generate_children_border_case(self, next_node_id_getter, cluster_centers, clustering_threshold):", "# graph_node)) # if self.children is not None: # for child in self.children:", "d_matrix[d_matrix == np.inf] = 0 self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix), index=self.original_time_series_ids) def _add_subsequence(self, subsequence): self.root.add_db_subsequence(subsequence)", "clustering_threshold): self.root = Node(0, self.max_level, prototypes, affinities, None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted)", "# for subsequence in prototypes) # self._inverted_file = Counter(original_time_series_id) self._inverted_file = Counter() #", "0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids] if timer", "_queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self): self._build_node_shorcuts(True) self._build_weights_vector() self._build_d_data_frame() def _queried_time_series_ids_iterator(self): for node in", "= weighted prototype_subsequences = np.array(prototype_subsequences_list) self._build_tree(affinities, prototype_subsequences, clustering_threshold) self._populate_tree(db_time_series) self._build_node_shorcuts() self._build_weights_vector() self._build_d_data_frame() @property", "= [time_series_twed(subsequence, node.center) for node in self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_query_subsequence(subsequence) def add_db_subsequence(self,", "= q_vector / q_norm return self._query_vector @property def _queried_time_series_ids(self): return list(set().union(*self._queried_time_series_ids_iterator())) def prune(self):", "self._query_vector = None for subsequence in subsequences: self.root.add_query_subsequence(subsequence) if timer is not None:", "if affinities is not None else None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences", "in self.node_shortcuts] self.weights = np.array(weights_list) def _build_d_data_frame(self, just_leaves=False): d_list = [node.d_vector for node", "None, None, self.get_next_node_id(), self.get_original_time_series_ids(), clustering_threshold, weighted=self._weighted) def _populate_tree(self, db_time_series): print(\"populating tree\") print('time window')", "n_clusters is None or n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold)", "= ids.index(key) m[index] = value return m @property def q(self): if self.n_query_subsequences is", "@property def n_original_time_series(self): return len(self.original_time_series_ids) @property def query_vector(self): if self._query_vector is None: q_vector", "cluster_centers): indices = np.where(labels==cluster_label)[0] child_prototypes = prototypes[indices] child_affinities = affinities[indices][:, indices] child =", "= 1 except AttributeError: print(\"Attribute Error caught\") print(\"weight = {0}\".format(w)) return w @property", "subsequences: self.root.add_query_subsequence(subsequence) if timer is not None: timer.stop() timer.start() not_zero_node_ids = np.where(self.query_vector !=", "= np.where(self.query_vector != 0)[0] not_zero_query_vector = self.query_vector[not_zero_node_ids] not_zero_ts_ids = self._queried_time_series_ids not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids,", "weighted self.max_level = max_level self.center = center self.parent = parent self.get_original_time_series_ids_in_tree = original_time_series_ids_getter", "None self.d_data_frame = None self._original_time_series_ids = None self._query_vector = None self.n_nodes = 0", "[v for v in shortcut_dict.values() if not just_leaves or v.is_leaf] self.node_shortcuts = shortcut_list", "return id_ def make_query(self, time_series, timer=None): if timer is not None: timer.start() subsequences", "timer is not None: timer.stop() return result def get_db_subsequences_dict(self): def _get_db_subsequences_dict(): return self.db_subsequences_dict", "np.linalg.norm(d_matrix, axis=1) d_matrix = (d_matrix.T / d_norm).T d_matrix[d_matrix == np.inf] = 0 self.d_data_frame", "== 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers = prototypes[ap.cluster_centers_indices_] labels", "Counter(original_time_series_id) self._inverted_file = Counter() # def add_to_graph(self, parent_graph_node, graph): # graph_node = pydot.Node(str(self))", "is not None: timer.stop() timer.start() self._query_vector = None for subsequence in subsequences: self.root.add_query_subsequence(subsequence)", "1, self.max_level, child_prototypes, child_affinities, center, self, next_node_id_getter, self.get_original_time_series_ids_in_tree, clustering_threshold) children.append(child) self.children = children", "nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for subsequence", "candidate_preferences: ap.preference = preference ap.fit(affinities) indices = ap.cluster_centers_indices_ if indices is not None", "= self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for subsequence in", "SubsequenceTree: def __init__(self, max_level, prototype_subsequences_list, affinities, db_time_series, time_window, time_step, clustering_threshold=1, weighted=True): self.time_window =", "return w @property def m_vector(self): m = np.zeros(self.n_original_time_series_in_tree) ids = self.get_original_time_series_ids_in_tree() for key,", "import Counter from distance_utils import time_series_twed import pandas as pd class SubsequenceTree: def", "def add_query_subsequence(self, subsequence): self.n_query_subsequences += 1 if not self.is_leaf: distances = [time_series_twed(subsequence, node.center)", "weighted=True): self.time_window = time_window self.time_step = time_step self.max_level = max_level #self.graph = pydot.Dot(graph_type='graph')", "else None print(\"affinities shape = {0}\".format(shape)) print(\"\") self.n_query_subsequences = 0 self.children = None", "= max_level #self.graph = pydot.Dot(graph_type='graph') self.query_ts = None self.query_score_chart = None self.node_shortcuts =", "or n_clusters == 1: cluster_centers = prototypes self._generate_children_border_case(next_node_id_getter, cluster_centers, clustering_threshold) return cluster_centers =", "self.children] nearest_child = self.children[np.argmin(distances)] nearest_child.add_db_subsequence(subsequence) def _generate_inverted_file(self): # original_time_series_id = (subsequence.original_id # for", "None self.query_score_chart = None self.node_shortcuts = None self.weights = None self.d_data_frame = None", "= np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for preference in", "def _build_weights_vector(self): weights_list = [node.weight for node in self.node_shortcuts] self.weights = np.array(weights_list) def", "# graph.add_edge(pydot.Edge(parent_graph_node, # graph_node)) # if self.children is not None: # for child", "np.min(affinities) smax = np.max(affinities) candidate_preferences = np.linspace(smin, smax, 10) ap = AffinityPropagation(affinity='precomputed') for" ]
[ "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} del dc['d1'] # second gen", "2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain']", "4} assert dc.get('d1', {}) == {'domain': 1} assert dc.get('d3', {}) == {'domain': [3,", "unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in", "should happen dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert", "4} assert dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5', {'domain': 6}) == {'domain':", "DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] =", "{'domain': 4} assert dc['d1'] == {'domain': 1} assert dc['d2'] == {'domain': 2} assert", "'d4' in dc def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain':", "def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] =", "assert dc.pop('d3') == {'domain': [3, 2, 1]} assert 'd3' not in dc dc.flush()", "DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc = DomainCache(2,", "1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2']", "Connection import logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\")", "def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] =", "second gen assert 'd3' in dc # first gen dc.flush() assert 'd4' in", "dc[''] = {'test':1} def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del", "1]} assert 'd3' not in dc dc.flush() assert dc.pop('d4') == {'domain': 4} assert", "[3, 2, 1]} assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2,", "dc['d2'] = {'domain': 2} # eviction should happen dc['d3'] = {'domain': [3, 2,", "dc # second gen assert 'd3' in dc # first gen dc.flush() assert", "assert dc['d1'] == {'domain': 1} assert dc['d2'] == {'domain': 2} assert dc['d3'] ==", "dc['d4'] = {'domain': 4} assert 'd1' in dc # second gen assert 'd3'", "2, 1]} dc['d4'] = {'domain': 4} assert dc.pop('d1') == {'domain': 1} assert 'd1'", "{'domain': 1} assert dc['d2'] == {'domain': 2} assert dc['d3'] == {'domain': [3, 2,", "2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert 'd1'", "dc.get('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn,", "del dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain':", "{}) == {'domain': 2} def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError):", "1]} dc['d4'] = {'domain': 4} assert dc.pop('d1') == {'domain': 1} assert 'd1' not", "dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc", "batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3,", "= {'domain': 4} del dc['d1'] # second gen del dc['d3'] # first gen", "DomainCache from happybase import Connection import logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self):", "{'domain': 1} assert dc.get('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc", "assert 'd3' in dc # first gen dc.flush() assert 'd4' in dc def", "assert dc['d3'] == {'domain': [3, 2, 1]} assert dc['d4'] == {'domain': 4} def", "from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection import logging import unittest class", "first gen dc.flush() assert 'd4' in dc def test_pop(self): dc = DomainCache(2, self.conn,", "not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,} }) t =", "utf-8 -*- from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection import logging import", "{'domain': 1} dc['d2'] = {'domain': 2} # eviction should happen dc['d3'] = {'domain':", "= {'domain': 4} assert 'd1' in dc # second gen assert 'd3' in", "def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2']", "dc['d4'] = {'domain': 4} assert dc['d1'] == {'domain': 1} assert dc['d2'] == {'domain':", "dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] =", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.pop('d1') ==", "== {'domain': 1} assert dc['d2'] == {'domain': 2} assert dc['d3'] == {'domain': [3,", "{'domain': 4} del dc['d1'] # second gen del dc['d3'] # first gen dc.flush()", "second gen del dc['d3'] # first gen dc.flush() del dc['d4'] # hbase def", "assert dc.pop('d1') == {'domain': 1} assert 'd1' not in dc assert dc.pop('d3') ==", "assert 'd3' not in dc dc.flush() assert dc.pop('d4') == {'domain': 4} assert 'd4'", "class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables():", "{'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1']", "{'domain': 1} assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3', {})", "'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc", "Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,}", "+= 1 assert dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self): dc = DomainCache(2,", "[3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {}) == {'domain': 1}", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc['d1'] ==", "dc['d2'] == {'domain': 2} assert dc['d3'] == {'domain': [3, 2, 1]} assert dc['d4']", "{'domain': 6} dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self):", "{'domain': 2} def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] =", "# hbase def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1}", "[3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.get('d1', {}) == {'domain': 1}", "test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert 'd1' in", "test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "= {'domain': 2} # eviction should happen dc['d3'] = {'domain': [3, 2, 1]}", "assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2,", "self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] =", "# first gen dc.flush() assert 'd4' in dc def test_pop(self): dc = DomainCache(2,", "}) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2,", "1]} dc['d4'] = {'domain': 4} assert dc['d1'] == {'domain': 1} assert dc['d2'] ==", "dc['d3'] == {'domain': [3, 2, 1]} assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self):", "= DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc =", "= {'test':1} def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1']", "test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1}", "[3, 2, 1]} dc['d4'] = {'domain': 4} del dc['d1'] # second gen del", "DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} # eviction", "# eviction should happen dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain':", "in dc # second gen assert 'd3' in dc # first gen dc.flush()", "= DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3']", "dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush()", "{'domain': 1} assert 'd1' not in dc assert dc.pop('d3') == {'domain': [3, 2,", "assert dc.get('d1', {}) == {'domain': 1} assert dc.get('d3', {}) == {'domain': [3, 2,", "self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc = DomainCache(2, self.conn,", "6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]}", "== {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1']", "1} dc['d2'] = {'domain': 2} # eviction should happen dc['d3'] = {'domain': [3,", "test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "# first gen dc.flush() del dc['d4'] # hbase def test_contains(self): dc = DomainCache(2,", "== {'domain': 2} assert dc['d3'] == {'domain': [3, 2, 1]} assert dc['d4'] ==", "= {'domain': 4} assert dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5', {'domain': 6})", "'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "== {'domain': 6} dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]} def", "assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1']", "dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with", "dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self): dc", "1]} assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "= {'domain': 4} assert dc['d1'] == {'domain': 1} assert dc['d2'] == {'domain': 2}", "== {'domain': 1} assert dc.get('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self):", "{}) == {'domain': 1} assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush() assert", "'d3' not in dc dc.flush() assert dc.pop('d4') == {'domain': 4} assert 'd4' not", "dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5', {'domain':", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert 'd1' in dc", "[3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] =", "dc['d1'] # second gen del dc['d3'] # first gen dc.flush() del dc['d4'] #", "1} assert dc['d2'] == {'domain': 2} assert dc['d3'] == {'domain': [3, 2, 1]}", "dc['d4'] # hbase def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain':", "'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain':", "[3, 2, 1]} assert 'd3' not in dc dc.flush() assert dc.pop('d4') == {'domain':", "<filename>tests/contrib/backends/hbase/test_domain_cache.py<gh_stars>1000+ # -*- coding: utf-8 -*- from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import", "happybase import Connection import logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn", "{'max_versions': 1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def", "= DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2}", "== {'domain': 1} assert 'd1' not in dc assert dc.pop('d3') == {'domain': [3,", "b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,} }) t", "== {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3)", "'d1' in dc # second gen assert 'd3' in dc # first gen", "dc.flush() assert 'd4' in dc def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1']", "{'domain': 6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3, 2,", "2, 1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {}) == {'domain': 1} assert", "del dc['d3'] # first gen dc.flush() del dc['d4'] # hbase def test_contains(self): dc", "[3, 2, 1]} dc['d4'] = {'domain': 4} assert dc['d1'] == {'domain': 1} assert", "{'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] =", "setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm':", "dc['d4'] = {'domain': 4} assert dc.get('d1', {}) == {'domain': 1} assert dc.get('d3', {})", "1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {}) ==", "2} assert dc['d3'] == {'domain': [3, 2, 1]} assert dc['d4'] == {'domain': 4}", "def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] =", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.get('d1', {}) ==", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] +=", "dc.pop('d1') == {'domain': 1} assert 'd1' not in dc assert dc.pop('d3') == {'domain':", "{'domain': 4} assert dc.get('d1', {}) == {'domain': 1} assert dc.get('d3', {}) == {'domain':", "dc # first gen dc.flush() assert 'd4' in dc def test_pop(self): dc =", "= {'domain': 4} assert dc.pop('d1') == {'domain': 1} assert 'd1' not in dc", "self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1')", "4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2']", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.pop('d1') == {'domain':", "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert 'd1' in dc #", "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {}) == {'domain':", "dc.pop('d3') == {'domain': [3, 2, 1]} assert 'd3' not in dc dc.flush() assert", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] += 1", "del dc['d1'] # second gen del dc['d3'] # first gen dc.flush() del dc['d4']", "happen dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc['d1']", "dc.flush() del dc['d4'] # hbase def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1']", "2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} del dc['d1']", "= {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3, 2, 1]}", "gen del dc['d3'] # first gen dc.flush() del dc['d4'] # hbase def test_contains(self):", "{'domain': 4} assert dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5', {'domain': 6}) ==", "{'domain': 4} assert dc.pop('d1') == {'domain': 1} assert 'd1' not in dc assert", "{ 'm': {'max_versions': 1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3')", "if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,} })", "self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1']", "assert 'd1' not in dc assert dc.pop('d3') == {'domain': [3, 2, 1]} assert", "dc assert dc.pop('d3') == {'domain': [3, 2, 1]} assert 'd3' not in dc", "assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3', {}) == {'domain':", "dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3,", "{'domain': 4} assert 'd1' in dc # second gen assert 'd3' in dc", "== {'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain':", "== {'domain': 2} def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc['']", "t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1}", "with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3']", "self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain':", "2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.pop('d1')", "1} assert 'd1' not in dc assert dc.pop('d3') == {'domain': [3, 2, 1]}", "[3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.pop('d1') == {'domain': 1} assert", "DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3']", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} del dc['d1'] #", "dc['d4'] = {'domain': 4} assert dc.pop('d1') == {'domain': 1} assert 'd1' not in", "self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError):", "'m': {'max_versions': 1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4')", "hbase def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2']", "t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] =", "dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {}) == {'domain':", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc['d1'] == {'domain':", "2, 1]} dc['d4'] = {'domain': 4} assert dc['d1'] == {'domain': 1} assert dc['d2']", "def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', {", "dc['d1'] == {'domain': 1} assert dc['d2'] == {'domain': 2} assert dc['d3'] == {'domain':", "= {'domain': 4} assert dc.get('d1', {}) == {'domain': 1} assert dc.get('d3', {}) ==", "test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain':", "logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions':", "'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} # eviction should happen", "dc.get('d1', {}) == {'domain': 1} assert dc.get('d3', {}) == {'domain': [3, 2, 1]}", "dc['d3'] # first gen dc.flush() del dc['d4'] # hbase def test_contains(self): dc =", "test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "gen dc.flush() del dc['d4'] # hbase def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "{'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {}) == {'domain': 2} def", "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.pop('d1') == {'domain': 1}", "2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain':", "from happybase import Connection import logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG)", "{'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1}", "not in dc dc.flush() assert dc.pop('d4') == {'domain': 4} assert 'd4' not in", "dc.setdefault('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn,", "1]} dc['d4'] = {'domain': 4} del dc['d1'] # second gen del dc['d3'] #", "self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} # eviction should", "'d1' not in dc assert dc.pop('d3') == {'domain': [3, 2, 1]} assert 'd3'", "# second gen assert 'd3' in dc # first gen dc.flush() assert 'd4'", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} del dc['d1'] # second", "4} del dc['d1'] # second gen del dc['d3'] # first gen dc.flush() del", "2, 1]} assert 'd3' not in dc dc.flush() assert dc.pop('d4') == {'domain': 4}", "2, 1]} assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc = DomainCache(2, self.conn,", "dc def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2']", "assert 'd4' in dc def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] =", "dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain':", "dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3, 2,", "2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {})", "== {'domain': 1} assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3',", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {})", "-*- coding: utf-8 -*- from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection import", "'d3' in dc # first gen dc.flush() assert 'd4' in dc def test_pop(self):", "gen assert 'd3' in dc # first gen dc.flush() assert 'd4' in dc", "{'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} del", "assert dc['d2'] == {'domain': 2} assert dc['d3'] == {'domain': [3, 2, 1]} assert", "{'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1',", "[3, 2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1',", "self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} # eviction should happen dc['d3']", "{}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata',", "4} assert dc['d1'] == {'domain': 1} assert dc['d2'] == {'domain': 2} assert dc['d3']", "logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata'", "1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5',", "not in dc assert dc.pop('d3') == {'domain': [3, 2, 1]} assert 'd3' not", "gen dc.flush() assert 'd4' in dc def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1',", "1]} dc['d4'] = {'domain': 4} assert dc.get('d1', {}) == {'domain': 1} assert dc.get('d3',", "import logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if", "1 assert dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self): dc = DomainCache(2, self.conn,", "= {'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {}) == {'domain': 2}", "[3, 2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain':", "in dc dc.flush() assert dc.pop('d4') == {'domain': 4} assert 'd4' not in dc", "== {'domain': [3, 2, 1]} assert 'd3' not in dc dc.flush() assert dc.pop('d4')", "coding: utf-8 -*- from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection import logging", "dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.get('d1', {})", "1} assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3', {}) ==", "1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1}", "6} dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc", "= {'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4}", "import DomainCache from happybase import Connection import logging import unittest class TestDomainCache(unittest.TestCase): def", "'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2}", "first gen dc.flush() del dc['d4'] # hbase def test_contains(self): dc = DomainCache(2, self.conn,", "2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.get('d1',", "{'domain': [3, 2, 1]} assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc =", "{'test':1} def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1']", "1]} dc['d4'] = {'domain': 4} assert 'd1' in dc # second gen assert", "{}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection import logging import unittest class TestDomainCache(unittest.TestCase):", "1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self):", "'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3,", "in dc assert dc.pop('d3') == {'domain': [3, 2, 1]} assert 'd3' not in", "{'domain': [3, 2, 1]} assert 'd3' not in dc dc.flush() assert dc.pop('d4') ==", "{})['domain'] += 1 assert dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self): dc =", "[3, 2, 1]} dc['d4'] = {'domain': 4} assert 'd1' in dc # second", "{'domain': 2} # eviction should happen dc['d3'] = {'domain': [3, 2, 1]} dc['d4']", "self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2')", "== {'domain': [3, 2, 1]} assert dc['d4'] == {'domain': 4} def test_domain_cache_get_with_default(self): dc", "= Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled':", "2} def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1}", "def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] =", "DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1} dc['d2'] =", "4} assert dc.pop('d1') == {'domain': 1} assert 'd1' not in dc assert dc.pop('d3')", "TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata',", "def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] =", "= DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1} dc['d2']", "in dc def test_pop(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1}", "assert 'd1' in dc # second gen assert 'd3' in dc # first", "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} dc.setdefault('d1', {})['domain'] += 1 assert", "{'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert", "self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1,", "def test_domain_cache_setdefault(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] =", "4} dc.setdefault('d1', {})['domain'] += 1 assert dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self):", "assert dc.setdefault('d1', {}) == {'domain': 1} assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6}", "dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): del dc['d1'] dc['d1'] = {'domain': 1}", "= {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.setdefault('d1', {}) ==", "t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain':", "4} assert 'd1' in dc # second gen assert 'd3' in dc #", "with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self): dc = DomainCache(2, self.conn, 'domain_metadata') with", "-*- from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection import logging import unittest", "= {'domain': 1} dc['d2'] = {'domain': 2} # eviction should happen dc['d3'] =", "eviction should happen dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] = {'domain': 4}", "{'domain': 2} assert dc['d3'] == {'domain': [3, 2, 1]} assert dc['d4'] == {'domain':", "test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def test_deletion(self):", "dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2}", "def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata') with self.assertRaises(KeyError): dc[''] = {'test':1} def", "1,} }) t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc =", "dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault_with_second_gen_flush(self): dc =", "test_domain_cache_setdefault_with_second_gen_flush(self): dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] =", "= self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "2} # eviction should happen dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] =", "# -*- coding: utf-8 -*- from frontera.contrib.backends.hbase.domaincache import DomainCache from happybase import Connection", "dc.setdefault('d5', {'domain': 6}) == {'domain': 6} dc.flush() assert dc.setdefault('d3', {}) == {'domain': [3,", "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc.get('d1', {}) == {'domain':", "{'domain': [3, 2, 1]} dc['d4'] = {'domain': 4} assert dc['d1'] == {'domain': 1}", "dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] = {'domain':", "2, 1]} dc['d4'] = {'domain': 4} assert dc.get('d1', {}) == {'domain': 1} assert", "del dc['d4'] # hbase def test_contains(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] =", "assert dc.get('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc = DomainCache(2,", "# second gen del dc['d3'] # first gen dc.flush() del dc['d4'] # hbase", "{'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4']", "self.conn, 'domain_metadata', batch_size=3) dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} dc['d3'] =", "2, 1]} dc['d4'] = {'domain': 4} assert 'd1' in dc # second gen", "1} dc['d2'] = {'domain': 2} dc['d3'] = {'domain': [3, 2, 1]} dc['d4'] =", "import Connection import logging import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn =", "{}) == {'domain': 1} assert dc.get('d3', {}) == {'domain': [3, 2, 1]} def", "import unittest class TestDomainCache(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.DEBUG) self.conn = Connection(host=\"hbase-docker\") if b'domain_metadata' not", "t = self.conn.table('domain_metadata') t.delete('d1') t.delete('d2') t.delete('d3') t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn,", "1} assert dc.get('d3', {}) == {'domain': [3, 2, 1]} def test_domain_cache_setdefault(self): dc =", "t.delete('d4') def test_domain_cache_both_generations(self): dc = DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2']", "2, 1]} dc['d4'] = {'domain': 4} del dc['d1'] # second gen del dc['d3']", "in self.conn.tables(): self.conn.create_table('domain_metadata', { 'm': {'max_versions': 1, 'block_cache_enabled': 1,} }) t = self.conn.table('domain_metadata')", "= DomainCache(2, self.conn, 'domain_metadata') dc['d1'] = {'domain': 1} dc['d2'] = {'domain': 2} #", "in dc # first gen dc.flush() assert 'd4' in dc def test_pop(self): dc", "assert dc.setdefault('d1', {}) == {'domain': 2} def test_empty_key(self): dc = DomainCache(2, self.conn, 'domain_metadata')", "dc['d4'] = {'domain': 4} del dc['d1'] # second gen del dc['d3'] # first" ]
[ "defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path, 'r') as yaml_file: data = yaml.load(yaml_file,", "\"\\n\") return value def int_as_string(ctx, param, value): if type(value) == int: return str(value)", "return value def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name]", "resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and", "\"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path, 'r') as yaml_file: data", "tuple_to_csv(ctx, param, value): if param.multiple and not value: return None if type(value) ==", "== int: return str(value) return value def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\",", "= CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value): if type(value) == bool: return", "Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param, filename):", "import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click callback", "options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param, filename): return os.path.expanduser(filename) def", "def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx,", "return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class()", "from opnsense_cli.formats.base import Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx,", "param, value): if type(value) == int: return str(value) return value def resolve_linked_names_to_uuids(ctx, param,", "value def tuple_to_csv(ctx, param, value): if param.multiple and not value: return None if", "if type(value) == tuple: return \",\".join(value) return value def comma_to_newline(ctx, param, value): if", "value): if type(value) == str and \",\" in value: return value.replace(\",\", \"\\n\") return", "available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory = CliOutputFormatFactory(format_name) return", "return factory.get_class() def bool_as_string(ctx, param, value): if type(value) == bool: return str(int(value)) return", "\"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj, CommandFacade): return ctx.obj.resolve_linked_uuids(resolve_map, value) return", "if type(value) == bool: return str(int(value)) return value def tuple_to_csv(ctx, param, value): if", "def comma_to_newline(ctx, param, value): if type(value) == str and \",\" in value: return", "type(value) == int: return str(value) return value def resolve_linked_names_to_uuids(ctx, param, value): option_name =", "and not value: return None if type(value) == tuple: return \",\".join(value) return value", "def expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param,", "return value def tuple_to_csv(ctx, param, value): if param.multiple and not value: return None", "Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def", "\",\".join(value) return value def comma_to_newline(ctx, param, value): if type(value) == str and \",\"", "comma_to_newline(ctx, param, value): if type(value) == str and \",\" in value: return value.replace(\",\",", "yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param,", "int_as_string(ctx, param, value): if type(value) == int: return str(value) return value def resolve_linked_names_to_uuids(ctx,", "param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj,", "factory.get_class() def bool_as_string(ctx, param, value): if type(value) == bool: return str(int(value)) return value", "= options def expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def", "== str and \",\" in value: return value.replace(\",\", \"\\n\") return value def int_as_string(ctx,", "type(value) == tuple: return \",\".join(value) return value def comma_to_newline(ctx, param, value): if type(value)", "return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory", "not value: return None if type(value) == tuple: return \",\".join(value) return value def", "\",\" in value: return value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param, value): if", "= yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx,", "Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value): if type(value) ==", "param, value): if type(value) == bool: return str(int(value)) return value def tuple_to_csv(ctx, param,", "CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def", "open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename))", "CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def", "if type(value) == str and \",\" in value: return value.replace(\",\", \"\\n\") return value", "param, value): if type(value) == str and \",\" in value: return value.replace(\",\", \"\\n\")", "param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj, CommandFacade): return ctx.obj.resolve_linked_uuids(resolve_map, value)", "resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj, CommandFacade): return ctx.obj.resolve_linked_uuids(resolve_map, value) return value", "== bool: return str(int(value)) return value def tuple_to_csv(ctx, param, value): if param.multiple and", "= param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj, CommandFacade): return ctx.obj.resolve_linked_uuids(resolve_map,", "opnsense_cli.formats.base import Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param,", "if type(value) == int: return str(value) return value def resolve_linked_names_to_uuids(ctx, param, value): option_name", "def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory = CliOutputFormatFactory(format_name)", "value: return None if type(value) == tuple: return \",\".join(value) return value def comma_to_newline(ctx,", "type(value) == str and \",\" in value: return value.replace(\",\", \"\\n\") return value def", "-> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value): if type(value)", "value): if param.multiple and not value: return None if type(value) == tuple: return", "def tuple_to_csv(ctx, param, value): if param.multiple and not value: return None if type(value)", "as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map =", "value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj, CommandFacade):", "https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path, 'r') as yaml_file:", "format_name) -> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value): if", "= dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats():", "import os from opnsense_cli.facades.commands.base import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import", "from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click callback methods See:", "options def expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx,", "methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path, 'r')", "None if type(value) == tuple: return \",\".join(value) return value def comma_to_newline(ctx, param, value):", "bool_as_string(ctx, param, value): if type(value) == bool: return str(int(value)) return value def tuple_to_csv(ctx,", "type(value) == bool: return str(int(value)) return value def tuple_to_csv(ctx, param, value): if param.multiple", "bool: return str(int(value)) return value def tuple_to_csv(ctx, param, value): if param.multiple and not", "data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param, filename): return os.path.expanduser(filename)", "with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options =", "value): if type(value) == bool: return str(int(value)) return value def tuple_to_csv(ctx, param, value):", "in value: return value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param, value): if type(value)", "int: return str(value) return value def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\")", "CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click callback methods", "param, filename): def dict_from_yaml(path): with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader)", "param, filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) ->", "callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path,", "option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value and isinstance(ctx.obj, CommandFacade): return", "return None if type(value) == tuple: return \",\".join(value) return value def comma_to_newline(ctx, param,", "return value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param, value): if type(value) == int:", "filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format:", "def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if value", "See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path, 'r') as", "def int_as_string(ctx, param, value): if type(value) == int: return str(value) return value def", "import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\"", "filename): def dict_from_yaml(path): with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return", "import Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename):", "def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with open(path, 'r') as yaml_file: data =", "def bool_as_string(ctx, param, value): if type(value) == bool: return str(int(value)) return value def", "from opnsense_cli.facades.commands.base import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\"", "dict_from_yaml(path): with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options", "param, format_name) -> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value):", "os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name) -> Format: factory =", "param.multiple and not value: return None if type(value) == tuple: return \",\".join(value) return", "return str(value) return value def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map", "opnsense_cli.facades.commands.base import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click", "factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value): if type(value) == bool:", "return \",\".join(value) return value def comma_to_newline(ctx, param, value): if type(value) == str and", "value def int_as_string(ctx, param, value): if type(value) == int: return str(value) return value", "tuple: return \",\".join(value) return value def comma_to_newline(ctx, param, value): if type(value) == str", "str(value) return value def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map =", "import yaml import os from opnsense_cli.facades.commands.base import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from", "value): if type(value) == int: return str(value) return value def resolve_linked_names_to_uuids(ctx, param, value):", "value: return value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param, value): if type(value) ==", "str(int(value)) return value def tuple_to_csv(ctx, param, value): if param.multiple and not value: return", "str and \",\" in value: return value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param,", "ctx.default_map = options def expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys()", "return value def comma_to_newline(ctx, param, value): if type(value) == str and \",\" in", "formatter_from_formatter_name(ctx, param, format_name) -> Format: factory = CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param,", "value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param, value): if type(value) == int: return", "value def comma_to_newline(ctx, param, value): if type(value) == str and \",\" in value:", "\"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path):", "Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications \"\"\" def defaults_from_configfile(ctx, param, filename): def dict_from_yaml(path): with", "yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options", "opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format \"\"\" Click callback methods See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications", "return str(int(value)) return value def tuple_to_csv(ctx, param, value): if param.multiple and not value:", "value def resolve_linked_names_to_uuids(ctx, param, value): option_name = param.opts[0].replace(\"--\", \"\") resolve_map = ctx.obj.uuid_resolver_map[option_name] if", "param, value): if param.multiple and not value: return None if type(value) == tuple:", "if param.multiple and not value: return None if type(value) == tuple: return \",\".join(value)", "== tuple: return \",\".join(value) return value def comma_to_newline(ctx, param, value): if type(value) ==", "expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats(): return CliOutputFormatFactory._keymap.keys() def formatter_from_formatter_name(ctx, param, format_name)", "return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param, filename): return", "and \",\" in value: return value.replace(\",\", \"\\n\") return value def int_as_string(ctx, param, value):", "'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map", "data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data options = dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def", "yaml import os from opnsense_cli.facades.commands.base import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base", "os from opnsense_cli.facades.commands.base import CommandFacade from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory from opnsense_cli.formats.base import Format", "return value def int_as_string(ctx, param, value): if type(value) == int: return str(value) return", "CliOutputFormatFactory(format_name) return factory.get_class() def bool_as_string(ctx, param, value): if type(value) == bool: return str(int(value))", "def dict_from_yaml(path): with open(path, 'r') as yaml_file: data = yaml.load(yaml_file, Loader=yaml.SafeLoader) return data", "dict_from_yaml(os.path.expanduser(filename)) ctx.default_map = options def expand_path(ctx, param, filename): return os.path.expanduser(filename) def available_formats(): return" ]
[ "conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true", "[(1, 2, 3), (4, 5, 6), (7, 8, 9)]) \"\"\" raise NotImplementedError def", "id, record FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with", "incorrect. Should be' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def", "the record with this identifier is successful :param identifier: (str) Identifier of the", "\"\"\" query = f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\" f\"", "list) Returns a list of the identifiers of all successful runs \"\"\" query", "table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\")", "conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true / false on whether the record", "conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records. >>>", "on if job ran successfully \"\"\" query = f\"SELECT record FROM {self.table_name} \"", "\"\"\" Creates tables if they don't already exist. \"\"\" # Create main table", "of the job record :return: (bool) Boolean on if job ran successfully \"\"\"", "ran successfully \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\"", "conn.commit() def _delete_tables(self): \"\"\" Drops the database tables \"\"\" with psycopg2.connect(self.connection_info) as conn:", "f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit()", ">>> execute_values(cur, ... \"INSERT INTO test (id, v1, v2) VALUES %s\", ... [(1,", "by the given identifier from the database :param identifier: (str) Identifier of the", "f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the database", "return False def count_records(self): \"\"\" :return: (int) Number of records in the table", "as cur: cur.execute(query) return [name[0] for name in cur] def get_failed_runs(self): \"\"\" :return:", "cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry into the", "of successfull records in the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name}", "in them \"\"\" query = f\"SELECT id, record FROM {self.table_name} \" \\ \"WHERE", "Number of failed records in the table \"\"\" query = f\"SELECT COUNT(*) FROM", "\" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS", "f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record =", "Dictionary of error types mapped to lists of job identifiers which record in", "6), (7, 8, 9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an", "the job with the identifier parsed and returns it :param identifier: (str) Identifier", "IF NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record", "{self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number of successfull records in", "as cur: cur.execute(query) record = cur.fetchone() if record is not None: return record[0]", "varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE", "Number of successfull records in the table \"\"\" query = f\"SELECT COUNT(*) FROM", "string name for the errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not", "conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry into the table with", "Optional string name for the errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if", "return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT INTO", "FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "record :return: (str) Record of job \"\"\" query = f\"SELECT record FROM {self.table_name}", "_test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string", "as conn: with conn.cursor() as cur: cur.execute(query) record = cur.fetchone() if record is", "for the errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise", "the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*)", "\"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0]", "conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry", "'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try:", "cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return: (dict) Dictionary of all job identifiers", "NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length})", "with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\" f\" id", "(name, record) in cur: record_dict[name] = record return record_dict def get_successful_runs(self): \"\"\" :return:", "insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry into the table with a given", "runs \"\"\" query = f\"SELECT id FROM {self.table_name} \" \\ \"WHERE record='success';\" with", "identifier and erroneous record :param identifier: (str) Identifier of the job :param error_type:", "exist. \"\"\" # Create main table with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "to lists of job identifiers which record in them \"\"\" query = f\"SELECT", "{self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\"", "(dict) Dictionary of all job identifiers mapped to their respective records \"\"\" query", "Inserts an entry into the table with a given identifier and erroneous record", "cur: cur.execute(query) return [name[0] for name in cur] def get_failed_runs(self): \"\"\" :return: (dict)", "\" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "{} for (name, record) in cur: failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self,", "cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number of successfull records in the table", "cur: record_dict[name] = record return record_dict def get_successful_runs(self): \"\"\" :return: (str list) Returns", "erroneous record :param identifier: (str) Identifier of the job :param error_type: (str) Record", "record) in cur: record_dict[name] = record return record_dict def get_successful_runs(self): \"\"\" :return: (str", "the identifiers of all successful runs \"\"\" query = f\"SELECT id FROM {self.table_name}", "(dict) Dictionary of error types mapped to lists of job identifiers which record", "\"\"\" :return: (int) Number of successfull records in the table \"\"\" query =", "= cur.fetchone() if record is not None: return record[0] == 'success' return False", "of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT", "cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT INTO test", "with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\"", "= f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with", "= 255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str)", "already exist. \"\"\" # Create main table with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "identifiers of all successful runs \"\"\" query = f\"SELECT id FROM {self.table_name} \"", "cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true / false on", "the errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please", "with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records.", "the given identifier from the database :param identifier: (str) Identifier of the job", "_create_tables(self): \"\"\" Creates tables if they don't already exist. \"\"\" # Create main", "= {} for (name, record) in cur: record_dict[name] = record return record_dict def", "query = f\"SELECT id, record FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info)", "TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects the", "parsed and returns it :param identifier: (str) Identifier of the job record :return:", "Deletes all entries from the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "job :param error_type: (str) Record of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type", "of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name = error_table_name self._create_tables()", "of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \"", "self._test_connection() self.table_name = table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try: conn =", "\\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit()", "job \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with", "f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name}", "get_record(self, identifier): \"\"\" Selects the record of the job with the identifier parsed", "with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def", "os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format", "error_type='failure'): \"\"\" Inserts an entry into the table with a given identifier and", ":return: (dict) Dictionary of all job identifiers mapped to their respective records \"\"\"", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return [name[0] for name", "'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self,", "job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name}", "self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in", "\" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query)", "if record is not None: return record[0] == 'success' return False def count_records(self):", "(str) Optional string name of the main db table. :param error_table_name: (str) Optional", "(7, 8, 9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an entry", "and erroneous record :param identifier: (str) Identifier of the job :param error_type: (str)", ":param error_type: (str) Record of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type =", "{self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "types mapped to lists of job identifiers which record in them \"\"\" query", "the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM", "get_successful_runs(self): \"\"\" :return: (str list) Returns a list of the identifiers of all", "the record 'success' :param identifier: (str) Identifier of the job \"\"\" if self.get_record(identifier):", "NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the database tables \"\"\" with", "as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry into", "conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\"", "Number of records in the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "successfull records in the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \"", "def _create_tables(self): \"\"\" Creates tables if they don't already exist. \"\"\" # Create", "('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def", "a given identifier and the record 'success' :param identifier: (str) Identifier of the", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return [name[0] for name in", "the job record :return: (str) Record of job \"\"\" query = f\"SELECT record", "= f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn:", "record_dict def get_successful_runs(self): \"\"\" :return: (str list) Returns a list of the identifiers", "= f\"SELECT id, record FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as", "record_dict = {} for (name, record) in cur: record_dict[name] = record return record_dict", "cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur, ...", "(int) Number of successfull records in the table \"\"\" query = f\"SELECT COUNT(*)", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record = cur.fetchone() if record", "def count_failures(self): \"\"\" :return: (int) Number of failed records in the table \"\"\"", "as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch", "of failed records in the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name}", "cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY,", "FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true / false on whether", "cur.fetchone() if record is not None: return record[0] == 'success' return False def", "conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert", "errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create", "\"\"\" :return: (int) Number of records in the table \"\"\" with psycopg2.connect(self.connection_info) as", "Returns true / false on whether the record with this identifier is successful", "return failures def delete_record(self, identifier): \"\"\" Deletes entry specified by the given identifier", "conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all entries from the", "identifier: (str) Identifier of the job :param error_type: (str) Record of the job", "count_successes(self): \"\"\" :return: (int) Number of successfull records in the table \"\"\" query", "os class DBHandler: _max_id_length = 255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"):", "conn.cursor() as cur: cur.execute(query) record_dict = {} for (name, record) in cur: record_dict[name]", "= record return record_dict def get_successful_runs(self): \"\"\" :return: (str list) Returns a list", "def delete_record(self, identifier): \"\"\" Deletes entry specified by the given identifier from the", "entry specified by the given identifier from the database :param identifier: (str) Identifier", "except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in", "\"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit()", "\"\"\" :return: (dict) Dictionary of error types mapped to lists of job identifiers", "entries from the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "is incorrect. Should be' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close()", "identifier is successful :param identifier: (str) Identifier of the job record :return: (bool)", "\"\"\" :return: (int) Number of failed records in the table \"\"\" query =", "cur: failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self, identifier): \"\"\" Deletes entry specified", "= f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn:", "entry into the table with a given identifier and erroneous record :param identifier:", "cur.execute(query) failures = {} for (name, record) in cur: failures.setdefault(record, []) failures[record].append(name) return", "conn.cursor() as cur: cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0] return None def", "\"\"\" query = f\"SELECT id, record FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with", "\" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query)", "\"\"\" query = f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as", "all job identifiers mapped to their respective records \"\"\" query = f\"SELECT *", "(str) Optional string name for the errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\")", "db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create environment", "= os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for", "their respective records \"\"\" query = f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as", "of the job :param error_type: (str) Record of the job \"\"\" if self.get_record(identifier):", "\"\"\" Returns true / false on whether the record with this identifier is", "for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name =", "INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name = error_table_name", "NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length})", "database :param identifier: (str) Identifier of the job \"\"\" query = f\"DELETE FROM", "query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as", "return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number of failed records in the", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def", "\\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) if", "as conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\")", "they don't already exist. \"\"\" # Create main table with psycopg2.connect(self.connection_info) as conn:", "as cur: cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0] return None def get_all_records(self):", "EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT", "cur: cur.execute(query) record = cur.fetchone() if record is not None: return record[0] ==", "from the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DELETE", "Identifier of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name}", "table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM", "lists of job identifiers which record in them \"\"\" query = f\"SELECT id,", "record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def", "import psycopg2 import os class DBHandler: _max_id_length = 255 _max_record_length = 255 def", "database tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE", "cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number of failed records in", "CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name", "with conn.cursor() as cur: cur.execute(query) record = cur.fetchone() if record is not None:", "NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an entry into the table with a", "delete_all_records(self): \"\"\" Deletes all entries from the table \"\"\" with psycopg2.connect(self.connection_info) as conn:", "ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name>", "for (name, record) in cur: record_dict[name] = record return record_dict def get_successful_runs(self): \"\"\"", "as cur: cur.execute(query) failures = {} for (name, record) in cur: failures.setdefault(record, [])", "2, 3), (4, 5, 6), (7, 8, 9)]) \"\"\" raise NotImplementedError def insert_success(self,", "format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables if", "Optional string name of the main db table. :param error_table_name: (str) Optional string", "all successful runs \"\"\" query = f\"SELECT id FROM {self.table_name} \" \\ \"WHERE", "not None: return record[0] == 'success' return False def count_records(self): \"\"\" :return: (int)", "conn.cursor() as cur: cur.execute(query) record = cur.fetchone() if record is not None: return", "table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records):", "record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the database tables", "record FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables if they don't already exist.", "{self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns", "of the job with the identifier parsed and returns it :param identifier: (str)", "get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error types mapped to lists of job", "as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int)", "= f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn:", "cur.execute(query) record_dict = {} for (name, record) in cur: record_dict[name] = record return", "self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>'", "with conn.cursor() as cur: cur.execute(query) failures = {} for (name, record) in cur:", "\\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record", "conn: with conn.cursor() as cur: cur.execute(query) return [name[0] for name in cur] def", "self.table_name = table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info)", "into the table with a given identifier and erroneous record :param identifier: (str)", "NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\" f\" id", "psycopg2 import os class DBHandler: _max_id_length = 255 _max_record_length = 255 def __init__(self,", "TABLE IF NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\"", "/ false on whether the record with this identifier is successful :param identifier:", "return record_dict def get_successful_runs(self): \"\"\" :return: (str list) Returns a list of the", "the main db table. :param error_table_name: (str) Optional string name for the errors", "NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length})", "identifier and the record 'success' :param identifier: (str) Identifier of the job \"\"\"", "execute_values(cur, ... \"INSERT INTO test (id, v1, v2) VALUES %s\", ... [(1, 2,", "as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier):", "f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with", "failures = {} for (name, record) in cur: failures.setdefault(record, []) failures[record].append(name) return failures", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP", "0: return cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return: (dict) Dictionary of all", "mapped to lists of job identifiers which record in them \"\"\" query =", "(4, 5, 6), (7, 8, 9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\"", "count_records(self): \"\"\" :return: (int) Number of records in the table \"\"\" with psycopg2.connect(self.connection_info)", "cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number of", "insert_success(self, identifier): \"\"\" Inserts an entry into the table with a given identifier", "not self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name>", "mapped to their respective records \"\"\" query = f\"SELECT * FROM {self.table_name}\" with", "COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "records \"\"\" query = f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with", "name of the main db table. :param error_table_name: (str) Optional string name for", "of error types mapped to lists of job identifiers which record in them", "for name in cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error types", "* FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record_dict", "the identifier parsed and returns it :param identifier: (str) Identifier of the job", "this identifier is successful :param identifier: (str) Identifier of the job record :return:", "None: return record[0] == 'success' return False def count_records(self): \"\"\" :return: (int) Number", "VALUES %s\", ... [(1, 2, 3), (4, 5, 6), (7, 8, 9)]) \"\"\"", "conn.commit() def get_record(self, identifier): \"\"\" Selects the record of the job with the", "with conn.cursor() as cur: cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0] return None", "raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name>", "Selects the record of the job with the identifier parsed and returns it", "if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\"", "cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\"", "query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as", "ran_successfully(self, identifier): \"\"\" Returns true / false on whether the record with this", "__init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string name of the main", "= error_table_name self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err:", "\\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return", "table with a given identifier and erroneous record :param identifier: (str) Identifier of", "return None def get_all_records(self): \"\"\" :return: (dict) Dictionary of all job identifiers mapped", "f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def", "\"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info)", "record = cur.fetchone() if record is not None: return record[0] == 'success' return", "error_type: (str) Record of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length]", "of the job record :return: (str) Record of job \"\"\" query = f\"SELECT", "records in the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "3), (4, 5, 6), (7, 8, 9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier):", "def count_successes(self): \"\"\" :return: (int) Number of successfull records in the table \"\"\"", "id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) if cur.rowcount >", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes", "records. >>> execute_values(cur, ... \"INSERT INTO test (id, v1, v2) VALUES %s\", ...", "Create main table with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE", "\"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables if they don't", "{self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "in the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"SELECT", "them \"\"\" query = f\"SELECT id, record FROM {self.table_name} \" \\ \"WHERE record<>'success';\"", "def ran_successfully(self, identifier): \"\"\" Returns true / false on whether the record with", "return [name[0] for name in cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of", "f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit()", "record return record_dict def get_successful_runs(self): \"\"\" :return: (str list) Returns a list of", "from the database :param identifier: (str) Identifier of the job \"\"\" query =", "Inserts an entry into the table with a given identifier and the record", "returns it :param identifier: (str) Identifier of the job record :return: (str) Record", "f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY", "Returns a list of the identifiers of all successful runs \"\"\" query =", "error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string name of the main db table.", "\"\"\" Drops the database tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "{self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record_dict = {}", "def get_record(self, identifier): \"\"\" Selects the record of the job with the identifier", "conn: with conn.cursor() as cur: cur.execute(query) record = cur.fetchone() if record is not", "count_failures(self): \"\"\" :return: (int) Number of failed records in the table \"\"\" query", "id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self):", "# Create main table with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"CREATE", "\\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query)", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'):", "err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in for format of", "of job identifiers which record in them \"\"\" query = f\"SELECT id, record", "\"\"\" Inserts an entry into the table with a given identifier and the", "and the record 'success' :param identifier: (str) Identifier of the job \"\"\" if", "as cur: cur.execute(query) record_dict = {} for (name, record) in cur: record_dict[name] =", "\"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info)", "of the identifiers of all successful runs \"\"\" query = f\"SELECT id FROM", "the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\"", "as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number of failed", "identifier: (str) Identifier of the job \"\"\" query = f\"DELETE FROM {self.table_name} \"", "id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE", "identifier): \"\"\" Deletes entry specified by the given identifier from the database :param", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\"", "identifier: (str) Identifier of the job record :return: (str) Record of job \"\"\"", ":return: (dict) Dictionary of error types mapped to lists of job identifiers which", "cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT", "Should be' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self):", "def get_successful_runs(self): \"\"\" :return: (str list) Returns a list of the identifiers of", "record in them \"\"\" query = f\"SELECT id, record FROM {self.table_name} \" \\", "cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \"", "query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as", "with the identifier parsed and returns it :param identifier: (str) Identifier of the", "255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional", "conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY,", ":param identifier: (str) Identifier of the job record :return: (bool) Boolean on if", "on whether the record with this identifier is successful :param identifier: (str) Identifier", "conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects the record of", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) failures = {} for", "all entries from the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", ":param identifier: (str) Identifier of the job :param error_type: (str) Record of the", "(str) Identifier of the job record :return: (str) Record of job \"\"\" query", "of all job identifiers mapped to their respective records \"\"\" query = f\"SELECT", "Identifier of the job record :return: (str) Record of job \"\"\" query =", "(bool) Boolean on if job ran successfully \"\"\" query = f\"SELECT record FROM", "cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true / false", "'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name", "conn: with conn.cursor() as cur: cur.execute(query) record_dict = {} for (name, record) in", "cur: cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0] return None def get_all_records(self): \"\"\"", "\"\"\" Deletes entry specified by the given identifier from the database :param identifier:", "is successful :param identifier: (str) Identifier of the job record :return: (bool) Boolean", "INTO test (id, v1, v2) VALUES %s\", ... [(1, 2, 3), (4, 5,", "as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all", "of the job \"\"\" query = f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\"", "\" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the", "error_table_name self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err)", "False def count_records(self): \"\"\" :return: (int) Number of records in the table \"\"\"", "{self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects the record", "main table with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF", "(int) Number of failed records in the table \"\"\" query = f\"SELECT COUNT(*)", "Identifier of the job record :return: (bool) Boolean on if job ran successfully", "cur.execute(query) return [name[0] for name in cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary", "identifier: (str) Identifier of the job record :return: (bool) Boolean on if job", "Boolean on if job ran successfully \"\"\" query = f\"SELECT record FROM {self.table_name}", "record) in cur: failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self, identifier): \"\"\" Deletes", "psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in for", "table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create environment variable", "cur.rowcount > 0: return cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return: (dict) Dictionary", "conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\" f\"", "cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects the record of the", "{self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "a given identifier and erroneous record :param identifier: (str) Identifier of the job", "\"\"\" Inserts an entry into the table with a given identifier and erroneous", "COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number of successfull", ":param identifier: (str) Identifier of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query =", "of job \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\"", "user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self):", "with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number", "respective records \"\"\" query = f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn:", "raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in for format of \"dbname=<db_name> user=<user_name>'", "cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all entries from the table \"\"\" with", "\"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info)", "{} for (name, record) in cur: record_dict[name] = record return record_dict def get_successful_runs(self):", "\" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "don't already exist. \"\"\" # Create main table with psycopg2.connect(self.connection_info) as conn: with", "\"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name = error_table_name self._create_tables() def", "the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT INTO", "the job record :return: (bool) Boolean on if job ran successfully \"\"\" query", "id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record = cur.fetchone()", "table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error", "\"\"\" # Create main table with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "= f\"SELECT id FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn:", "conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self,", "(int) Number of records in the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with", "cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error types mapped to lists", "record with this identifier is successful :param identifier: (str) Identifier of the job", "in the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE", "batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT INTO test (id, v1,", "record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def", "%s\", ... [(1, 2, 3), (4, 5, 6), (7, 8, 9)]) \"\"\" raise", "f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "record_dict[name] = record return record_dict def get_successful_runs(self): \"\"\" :return: (str list) Returns a", "'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates", "job record :return: (bool) Boolean on if job ran successfully \"\"\" query =", "self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with", "f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the database tables \"\"\" with psycopg2.connect(self.connection_info) as", "user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables if they don't already", ":param error_table_name: (str) Optional string name for the errors db table. \"\"\" self.connection_info", "in cur: record_dict[name] = record return record_dict def get_successful_runs(self): \"\"\" :return: (str list)", "def delete_all_records(self): \"\"\" Deletes all entries from the table \"\"\" with psycopg2.connect(self.connection_info) as", "the table with a given identifier and erroneous record :param identifier: (str) Identifier", "Drops the database tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "5, 6), (7, 8, 9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts", "is not None: return record[0] == 'success' return False def count_records(self): \"\"\" :return:", "Record of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query =", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS", "delete_record(self, identifier): \"\"\" Deletes entry specified by the given identifier from the database", "return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number of successfull records in the", "= table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except", "conn.cursor() as cur: cur.execute(query) return [name[0] for name in cur] def get_failed_runs(self): \"\"\"", "record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return [name[0] for", "job identifiers which record in them \"\"\" query = f\"SELECT id, record FROM", "query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as", "9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an entry into the", "main db table. :param error_table_name: (str) Optional string name for the errors db", "= {} for (name, record) in cur: failures.setdefault(record, []) failures[record].append(name) return failures def", "with a given identifier and erroneous record :param identifier: (str) Identifier of the", "(str) Record of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE", ":return: (bool) Boolean on if job ran successfully \"\"\" query = f\"SELECT record", "f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn: with", "\"INSERT INTO test (id, v1, v2) VALUES %s\", ... [(1, 2, 3), (4,", "\\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return", "\"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit()", "conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts", "insert records. >>> execute_values(cur, ... \"INSERT INTO test (id, v1, v2) VALUES %s\",", "f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with", "def _delete_tables(self): \"\"\" Drops the database tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with", "Creates tables if they don't already exist. \"\"\" # Create main table with", "Dictionary of all job identifiers mapped to their respective records \"\"\" query =", "the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \" \\", "record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number of failed records in the table", "conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length})", "PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF", "def insert_success(self, identifier): \"\"\" Inserts an entry into the table with a given", "given identifier and the record 'success' :param identifier: (str) Identifier of the job", "successful runs \"\"\" query = f\"SELECT id FROM {self.table_name} \" \\ \"WHERE record='success';\"", "= psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should", "id FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return:", "v2) VALUES %s\", ... [(1, 2, 3), (4, 5, 6), (7, 8, 9)])", "identifiers mapped to their respective records \"\"\" query = f\"SELECT * FROM {self.table_name}\"", "= 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string name", "as conn: with conn.cursor() as cur: cur.execute(query) failures = {} for (name, record)", "Deletes entry specified by the given identifier from the database :param identifier: (str)", "\"\"\" Selects the record of the job with the identifier parsed and returns", "list of the identifiers of all successful runs \"\"\" query = f\"SELECT id", "def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string name of the", "print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in for format of \"dbname=<db_name>", "def count_records(self): \"\"\" :return: (int) Number of records in the table \"\"\" with", ":return: (int) Number of successfull records in the table \"\"\" query = f\"SELECT", "self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as", "conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return:", "'success' :param identifier: (str) Identifier of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query", "... [(1, 2, 3), (4, 5, 6), (7, 8, 9)]) \"\"\" raise NotImplementedError", "\" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query)", "= error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with", "KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops", "table with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT", "identifier): \"\"\" Selects the record of the job with the identifier parsed and", "def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry into the table with a", "the database :param identifier: (str) Identifier of the job \"\"\" query = f\"DELETE", "Record of job \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE", ":return: (str) Record of job \"\"\" query = f\"SELECT record FROM {self.table_name} \"", "raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an entry into the table with", "cur.execute(query) record = cur.fetchone() if record is not None: return record[0] == 'success'", "\"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0]", "(str) Identifier of the job record :return: (bool) Boolean on if job ran", "table with a given identifier and the record 'success' :param identifier: (str) Identifier", "error_table_name: (str) Optional string name for the errors db table. \"\"\" self.connection_info =", "if they don't already exist. \"\"\" # Create main table with psycopg2.connect(self.connection_info) as", "Identifier of the job \"\"\" query = f\"DELETE FROM {self.table_name} \" \\ f\"WHERE", "error types mapped to lists of job identifiers which record in them \"\"\"", "def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS", "records): \"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT INTO test (id, v1, v2)", "create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection()", "the record of the job with the identifier parsed and returns it :param", "of all successful runs \"\"\" query = f\"SELECT id FROM {self.table_name} \" \\", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) if cur.rowcount > 0:", "for (name, record) in cur: failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self, identifier):", "failures def delete_record(self, identifier): \"\"\" Deletes entry specified by the given identifier from", "_delete_tables(self): \"\"\" Drops the database tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "the job :param error_type: (str) Record of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier)", "record of the job with the identifier parsed and returns it :param identifier:", "return cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return: (dict) Dictionary of all job", "(str) Identifier of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO", "self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES", "the job \"\"\" query = f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with", "conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self):", "255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string name of", "\"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS'", "table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\" with", "class DBHandler: _max_id_length = 255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\"", "query = f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn:", "cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number of failed records", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self,", "variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name =", "record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.error_table_name} (\"", "cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects", "KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"')", "record :return: (bool) Boolean on if job ran successfully \"\"\" query = f\"SELECT", "v1, v2) VALUES %s\", ... [(1, 2, 3), (4, 5, 6), (7, 8,", "FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number of successfull records", "an entry into the table with a given identifier and erroneous record :param", "\\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query)", "[name[0] for name in cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error", "if job ran successfully \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\", "environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') self._test_connection() self.table_name", "(\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\")", "= f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info)", "identifiers which record in them \"\"\" query = f\"SELECT id, record FROM {self.table_name}", "INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "with conn.cursor() as cur: cur.execute(query) record_dict = {} for (name, record) in cur:", "conn: with conn.cursor() as cur: cur.execute(query) failures = {} for (name, record) in", "f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with", "conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all entries", "string name of the main db table. :param error_table_name: (str) Optional string name", "tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\")", "job ran successfully \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE", "_max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string", "of the main db table. :param error_table_name: (str) Optional string name for the", "the database tables \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DROP", "job \"\"\" query = f\"DELETE FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info)", "= f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info) as conn:", "[]) failures[record].append(name) return failures def delete_record(self, identifier): \"\"\" Deletes entry specified by the", "\"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return [name[0]", "(str list) Returns a list of the identifiers of all successful runs \"\"\"", "(str) Identifier of the job :param error_type: (str) Record of the job \"\"\"", "of records in the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "with conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all entries from", "self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}',", "TABLE IF NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\"", "FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record_dict =", "record[0] == 'success' return False def count_records(self): \"\"\" :return: (int) Number of records", "\"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) failures =", "cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \"", "records in the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\", "with this identifier is successful :param identifier: (str) Identifier of the job record", "return record[0] == 'success' return False def count_records(self): \"\"\" :return: (int) Number of", "FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \" \\", "cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all entries from the table \"\"\"", "the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\"", "f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) if cur.rowcount", "tables if they don't already exist. \"\"\" # Create main table with psycopg2.connect(self.connection_info)", "\"\"\" :param table_name: (str) Optional string name of the main db table. :param", "(str) Identifier of the job \"\"\" query = f\"DELETE FROM {self.table_name} \" \\", "as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\"", "self._create_tables() def _test_connection(self): try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise", "{self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true / false on whether the", "8, 9)]) \"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an entry into", "... \"INSERT INTO test (id, v1, v2) VALUES %s\", ... [(1, 2, 3),", "identifier from the database :param identifier: (str) Identifier of the job \"\"\" query", "true / false on whether the record with this identifier is successful :param", "import os class DBHandler: _max_id_length = 255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\",", "== 'success' return False def count_records(self): \"\"\" :return: (int) Number of records in", "and returns it :param identifier: (str) Identifier of the job record :return: (str)", "get_all_records(self): \"\"\" :return: (dict) Dictionary of all job identifiers mapped to their respective", "identifier): \"\"\" Inserts an entry into the table with a given identifier and", "cur: cur.execute(query) failures = {} for (name, record) in cur: failures.setdefault(record, []) failures[record].append(name)", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record = cur.fetchone() if", "record is not None: return record[0] == 'success' return False def count_records(self): \"\"\"", "test (id, v1, v2) VALUES %s\", ... [(1, 2, 3), (4, 5, 6),", "with conn.cursor() as cur: cur.execute(query) return [name[0] for name in cur] def get_failed_runs(self):", "identifier: (str) Identifier of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT", "f\"VALUES ('{identifier}', 'success');\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit()", "the table with a given identifier and the record 'success' :param identifier: (str)", "\"\"\" query = f\"SELECT id FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info)", "in cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error types mapped to", "\"\"\" :return: (str list) Returns a list of the identifiers of all successful", "Identifier of the job :param error_type: (str) Record of the job \"\"\" if", "(str) Record of job \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\", "job record :return: (str) Record of job \"\"\" query = f\"SELECT record FROM", "conn.close() def _create_tables(self): \"\"\" Creates tables if they don't already exist. \"\"\" #", "record 'success' :param identifier: (str) Identifier of the job \"\"\" if self.get_record(identifier): self.delete_record(identifier)", "error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\" with psycopg2.connect(self.connection_info)", "name for the errors db table. \"\"\" self.connection_info = os.environ.get(\"CEDA_INTAKE_DB_SETTINGS\") if not self.connection_info:", "(name, record) in cur: failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self, identifier): \"\"\"", "COUNT(*) FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor()", "false on whether the record with this identifier is successful :param identifier: (str)", "an entry into the table with a given identifier and the record 'success'", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\"", "job with the identifier parsed and returns it :param identifier: (str) Identifier of", "as cur: cur.execute(query) return cur.fetchone()[0] def batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur,", "as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier): \"\"\" Returns true /", ":return: (str list) Returns a list of the identifiers of all successful runs", "failed records in the table \"\"\" query = f\"SELECT COUNT(*) FROM {self.table_name} \"", "def batch_insert(self, records): \"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT INTO test (id,", "db table. :param error_table_name: (str) Optional string name for the errors db table.", "in cur: failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self, identifier): \"\"\" Deletes entry", "conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int) Number of", "cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an entry into the table", "\"\"\" raise NotImplementedError def insert_success(self, identifier): \"\"\" Inserts an entry into the table", "be' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\"", "conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect.", "conn.cursor() as cur: cur.execute(query) failures = {} for (name, record) in cur: failures.setdefault(record,", "name in cur] def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error types mapped", "{self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\"", "as cur: cur.execute(query) conn.commit() def delete_all_records(self): \"\"\" Deletes all entries from the table", "cur: cur.execute(query) record_dict = {} for (name, record) in cur: record_dict[name] = record", "\"\"\"Batch insert records. >>> execute_values(cur, ... \"INSERT INTO test (id, v1, v2) VALUES", "query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as", "TABLE {self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects the record of the job", "varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self):", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) if cur.rowcount > 0: return", "conn: with conn.cursor() as cur: cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0] return", ":param identifier: (str) Identifier of the job \"\"\" query = f\"DELETE FROM {self.table_name}", "def get_all_records(self): \"\"\" :return: (dict) Dictionary of all job identifiers mapped to their", "conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self): \"\"\" :return: (int)", "identifier, error_type='failure'): \"\"\" Inserts an entry into the table with a given identifier", "whether the record with this identifier is successful :param identifier: (str) Identifier of", "KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() cur.execute(f\"CREATE TABLE IF NOT", "table_name: (str) Optional string name of the main db table. :param error_table_name: (str)", "= f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with psycopg2.connect(self.connection_info) as conn:", "specified by the given identifier from the database :param identifier: (str) Identifier of", "\\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) failures", "error_type = error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}', '{error_type}');\"", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) failures = {} for (name,", "'success' return False def count_records(self): \"\"\" :return: (int) Number of records in the", "given identifier from the database :param identifier: (str) Identifier of the job \"\"\"", "table. :param error_table_name: (str) Optional string name for the errors db table. \"\"\"", "given identifier and erroneous record :param identifier: (str) Identifier of the job :param", "of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables if they", "if cur.rowcount > 0: return cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return: (dict)", "query = f\"SELECT id FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as", "def get_failed_runs(self): \"\"\" :return: (dict) Dictionary of error types mapped to lists of", "which record in them \"\"\" query = f\"SELECT id, record FROM {self.table_name} \"", "\"\"\" Deletes all entries from the table \"\"\" with psycopg2.connect(self.connection_info) as conn: with", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) return cur.fetchone()[0] def count_failures(self):", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record_dict = {} for", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) record_dict = {} for (name,", "entry into the table with a given identifier and the record 'success' :param", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0]", "cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def count_successes(self): \"\"\" :return: (int) Number", "successfully \"\"\" query = f\"SELECT record FROM {self.table_name} \" \\ f\"WHERE id='{identifier}';\" with", "as conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self, identifier):", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return", "successful :param identifier: (str) Identifier of the job record :return: (bool) Boolean on", "record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) failures = {}", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"DELETE FROM {self.table_name};\") conn.commit() def ran_successfully(self,", "table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name: (str) Optional string name of the main db", "None def get_all_records(self): \"\"\" :return: (dict) Dictionary of all job identifiers mapped to", "\"\"\" :return: (dict) Dictionary of all job identifiers mapped to their respective records", "query = f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as", "identifier parsed and returns it :param identifier: (str) Identifier of the job record", "\"\"\" if self.get_record(identifier): self.delete_record(identifier) error_type = error_type[:self._max_record_length] query = f\"INSERT INTO {self.table_name} \"", "_max_id_length = 255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param table_name:", "cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return:", "with a given identifier and the record 'success' :param identifier: (str) Identifier of", "id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def", "(id, v1, v2) VALUES %s\", ... [(1, 2, 3), (4, 5, 6), (7,", "password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables if they don't already exist. \"\"\"", "psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name}", "for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"') conn.close() def _create_tables(self): \"\"\" Creates tables", "into the table with a given identifier and the record 'success' :param identifier:", "to their respective records \"\"\" query = f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info)", "as conn: with conn.cursor() as cur: cur.execute(query) record_dict = {} for (name, record)", "failures.setdefault(record, []) failures[record].append(name) return failures def delete_record(self, identifier): \"\"\" Deletes entry specified by", "job identifiers mapped to their respective records \"\"\" query = f\"SELECT * FROM", ":return: (int) Number of records in the table \"\"\" with psycopg2.connect(self.connection_info) as conn:", "as conn: with conn.cursor() as cur: cur.execute(query) if cur.rowcount > 0: return cur.fetchone()[0]", "failures[record].append(name) return failures def delete_record(self, identifier): \"\"\" Deletes entry specified by the given", "string is incorrect. Should be' 'in for format of \"dbname=<db_name> user=<user_name>' 'host=<host_name> password=<password>\"')", "PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\"", "IF NOT EXISTS {self.error_table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record", "as conn: with conn.cursor() as cur: cur.execute(query) return [name[0] for name in cur]", "password=<password>\"') self._test_connection() self.table_name = table_name self.error_table_name = error_table_name self._create_tables() def _test_connection(self): try: conn", "{self.error_table_name};\") conn.commit() def get_record(self, identifier): \"\"\" Selects the record of the job with", "a list of the identifiers of all successful runs \"\"\" query = f\"SELECT", "as conn: with conn.cursor() as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\"", "record :param identifier: (str) Identifier of the job :param error_type: (str) Record of", "> 0: return cur.fetchone()[0] return None def get_all_records(self): \"\"\" :return: (dict) Dictionary of", "psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be'", ":return: (int) Number of failed records in the table \"\"\" query = f\"SELECT", "if not self.connection_info: raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS' 'in for format of", "EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY KEY, \" f\" record varchar({self._max_record_length}) NOT", "identifier): \"\"\" Returns true / false on whether the record with this identifier", ":param identifier: (str) Identifier of the job record :return: (str) Record of job", "with conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier, error_type='failure'): \"\"\" Inserts an", "NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the database tables \"\"\" with psycopg2.connect(self.connection_info)", "f\"SELECT id, record FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn:", "conn.commit() def delete_all_records(self): \"\"\" Deletes all entries from the table \"\"\" with psycopg2.connect(self.connection_info)", ":param table_name: (str) Optional string name of the main db table. :param error_table_name:", "varchar({self._max_record_length}) NOT NULL\" f\");\") conn.commit() def _delete_tables(self): \"\"\" Drops the database tables \"\"\"", "try: conn = psycopg2.connect(self.connection_info) except psycopg2.Error as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is", "f\"SELECT id FROM {self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with", "\"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES ('{identifier}',", "job \"\"\" if self.get_record(identifier): self.delete_record(identifier) query = f\"INSERT INTO {self.table_name} \" \\ f\"VALUES", "conn: with conn.cursor() as cur: cur.execute(f\"DROP TABLE {self.table_name};\") conn.commit() cur.execute(f\"DROP TABLE {self.error_table_name};\") conn.commit()", "with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query) conn.commit() def insert_failure(self, identifier,", "f\"SELECT COUNT(*) FROM {self.table_name} \" \\ \"WHERE record<>'success';\" with psycopg2.connect(self.connection_info) as conn: with", "DBHandler: _max_id_length = 255 _max_record_length = 255 def __init__(self, table_name=\"intake_records\", error_table_name=\"scan_errors\"): \"\"\" :param", "f\"SELECT * FROM {self.table_name}\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(query)", "it :param identifier: (str) Identifier of the job record :return: (str) Record of", "as err: print(err) raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be' 'in for format", "as conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\") return cur.fetchone()[0] def", "{self.table_name} \" \\ \"WHERE record='success';\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur:", "as cur: cur.execute(f\"CREATE TABLE IF NOT EXISTS {self.table_name} (\" f\" id varchar({self._max_id_length}) PRIMARY", "\"\"\" with psycopg2.connect(self.connection_info) as conn: with conn.cursor() as cur: cur.execute(f\"SELECT COUNT(*) FROM {self.table_name};\")" ]
[ "json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except", "f: json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True", "<filename>cogs/util/file_handling.py<gh_stars>0 import os, os.path import errno import json def mkdir_p(path): try: os.makedirs(path) except", "mkdir_p(path): try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path):", "False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except Exception", "errno import json def mkdir_p(path): try: os.makedirs(path) except OSError as e: if e.errno", "else: raise def load_json(path): data = {} try: with open(path) as f: data", "mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}')", "json.load(f) print(f'{path} successfully loaded') except: print('Could not load json') return data def save_json(path,", "import os, os.path import errno import json def mkdir_p(path): try: os.makedirs(path) except OSError", "{cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.')", "return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except", "to load extension {cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}')", "def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except Exception as", "print('Could not load json') return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w')", "True except Exception as e: print(f'Failed to unload extension {cog}.', file=sys.stderr) traceback.print_exc() return", "extension {cog}.') return True except Exception as e: print(f'Failed to unload extension {cog}.',", "def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except Exception as", "unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except Exception as e:", "and os.path.isdir(path): pass else: raise def load_json(path): data = {} try: with open(path)", "as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def load_json(path):", "bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except Exception as e: print(f'Failed to unload", "= {} try: with open(path) as f: data = json.load(f) print(f'{path} successfully loaded')", "f: data = json.load(f) print(f'{path} successfully loaded') except: print('Could not load json') return", "data = json.load(f) print(f'{path} successfully loaded') except: print('Could not load json') return data", "try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass", "print(f'Unloaded extension {cog}.') return True except Exception as e: print(f'Failed to unload extension", "extension {cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension", "extension {cog}.') return True except Exception as e: print(f'Failed to load extension {cog}.',", "json def mkdir_p(path): try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST", "load extension {cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded", "cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except Exception as e: print(f'Failed", "print(f'{path} successfully loaded') except: print('Could not load json') return data def save_json(path, data):", "not load json') return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as", "import json def mkdir_p(path): try: os.makedirs(path) except OSError as e: if e.errno ==", "json') return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data,", "with open(path, 'w') as f: json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded", "errno.EEXIST and os.path.isdir(path): pass else: raise def load_json(path): data = {} try: with", "{} try: with open(path) as f: data = json.load(f) print(f'{path} successfully loaded') except:", "open(path) as f: data = json.load(f) print(f'{path} successfully loaded') except: print('Could not load", "as f: data = json.load(f) print(f'{path} successfully loaded') except: print('Could not load json')", "as e: print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot,", "data = {} try: with open(path) as f: data = json.load(f) print(f'{path} successfully", "def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data, f) def load_cog(bot,", "e: print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog):", "raise def load_json(path): data = {} try: with open(path) as f: data =", "os.path.isdir(path): pass else: raise def load_json(path): data = {} try: with open(path) as", "save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data, f) def load_cog(bot, cog):", "print(f'Loaded extension {cog}.') return True except Exception as e: print(f'Failed to load extension", "bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except Exception as e: print(f'Failed to load", "if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def load_json(path): data =", "file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return", "os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else:", "def load_json(path): data = {} try: with open(path) as f: data = json.load(f)", "{cog}.') return True except Exception as e: print(f'Failed to unload extension {cog}.', file=sys.stderr)", "e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def load_json(path): data = {}", "try: with open(path) as f: data = json.load(f) print(f'{path} successfully loaded') except: print('Could", "os.path import errno import json def mkdir_p(path): try: os.makedirs(path) except OSError as e:", "def mkdir_p(path): try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and", "traceback.print_exc() return False def unload_cog(bot, cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True", "loaded') except: print('Could not load json') return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with", "pass else: raise def load_json(path): data = {} try: with open(path) as f:", "f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except Exception", "OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def", "except: print('Could not load json') return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path,", "load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except Exception as e:", "try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return True except Exception as e: print(f'Failed to", "load_json(path): data = {} try: with open(path) as f: data = json.load(f) print(f'{path}", "{cog}.') return True except Exception as e: print(f'Failed to load extension {cog}.', file=sys.stderr)", "except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise", "== errno.EEXIST and os.path.isdir(path): pass else: raise def load_json(path): data = {} try:", "return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data, f)", "print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc() return False def unload_cog(bot, cog): try:", "load json') return data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f:", "e: if e.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def load_json(path): data", "data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data, f) def load_cog(bot, cog): try:", "data def save_json(path, data): mkdir_p(os.path.dirname(path)) with open(path, 'w') as f: json.dump(data, f) def", "as f: json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.') return", "return True except Exception as e: print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc()", "try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except Exception as e: print(f'Failed to", "Exception as e: print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc() return False def", "open(path, 'w') as f: json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension", "cog): try: bot.unload_extension(f'cogs.{cog}') print(f'Unloaded extension {cog}.') return True except Exception as e: print(f'Failed", "return True except Exception as e: print(f'Failed to unload extension {cog}.', file=sys.stderr) traceback.print_exc()", "with open(path) as f: data = json.load(f) print(f'{path} successfully loaded') except: print('Could not", "except Exception as e: print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc() return False", "True except Exception as e: print(f'Failed to load extension {cog}.', file=sys.stderr) traceback.print_exc() return", "import errno import json def mkdir_p(path): try: os.makedirs(path) except OSError as e: if", "except Exception as e: print(f'Failed to unload extension {cog}.', file=sys.stderr) traceback.print_exc() return False", "os, os.path import errno import json def mkdir_p(path): try: os.makedirs(path) except OSError as", "= json.load(f) print(f'{path} successfully loaded') except: print('Could not load json') return data def", "successfully loaded') except: print('Could not load json') return data def save_json(path, data): mkdir_p(os.path.dirname(path))", "'w') as f: json.dump(data, f) def load_cog(bot, cog): try: bot.load_extension(f'cogs.{cog}') print(f'Loaded extension {cog}.')" ]
[ "(0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0., 1.),", "None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8')", "( np.array([ [0., 0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError) ), ( np.array([", "@pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), ((0., 0., 1.),", "exception: Exception ) -> None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result", "1., 1.25] ]), None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1., 1.25] ]),", "[0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [1., 0.], [0.,", "DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array,", "0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()), ((0, 1., 1., 2.),", "1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0., 1.,", "pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'),", "pytest.raises(ValueError) ), ( np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]),", "expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1.,", "0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.),", "2., 2.5, 2.5] ]), np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise() ), (", "None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true,", "0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25),", "(np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10,", "1., DoesNotRaise()), ((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1.,", "pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError) ),", "np.array, expected_result: float, exception: Exception) -> None: with exception: result = mask_iou(mask_true=mask_true, mask_detection=mask_detection)", "pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)),", "None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0.,", "DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ]", "(0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0., 1.,", "1., 2.), 0., DoesNotRaise()), ((0, 1., 1., 2.), (0., 0., 1., 1.), 0.,", "1., 1.), 1., DoesNotRaise()), ((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9,", "def test_box_iou( box_true: Tuple[float, float, float, float], box_detection: Tuple[float, float, float, float], expected_result:", "0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError) ), ( np.array([ [0., 0., 1.,", "10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'),", "0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0., 1.), None,", "10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float,", "0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0.,", "1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0.,", "np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception ) -> None: with exception: result", "np.ndarray, expected_result: Optional[float], exception: Exception ) -> None: with exception: result = box_iou_batch(boxes_true=boxes_true,", "pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'),", "expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)),", "from contextlib import ExitStack as DoesNotRaise from typing import Tuple, Optional import numpy", "assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ ( None, np.array([", "np.array([ [0.6], [1/3] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0.,", "1.75], [1., 1., 2., 2.] ]), np.array([ [0.6, 1/7, 0], [1/3, 1., 0]", "0., DoesNotRaise()), ((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0.,", "[ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'),", "np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true:", "1., 2., 2.] ]), np.array([ [0.6, 1/7, 0], [1/3, 1., 0] ]), DoesNotRaise()", "0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0., 1.25, 1.),", "1., 1.75], [1., 1., 2., 2.] ]), np.array([ [0.6, 1/7, 0], [1/3, 1.,", "boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception ) -> None: with exception:", "1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()),", "(None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10,", "1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.),", "0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.),", "Tuple, Optional import numpy as np import pytest from onemetric.cv.utils.iou import box_iou, mask_iou,", ") ] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception", "DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25,", "[2., 2., 2.5, 2.5] ]), np.array([ [0., 0., 1., 1.], [2., 2., 2.5,", "1.], [2., 2., 2.5, 2.5] ]), np.array([ [0., 0., 1., 1.], [2., 2.,", "1., 0] ]), DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray,", "DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception:", "0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0.,", "(np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0.,", "[0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [0., 0., 1.,", "@pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ ( None, np.array([ [0., 0.25, 1., 1.25]", "(np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10,", "import pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\",", "1.), (0., 0., 1., 1.), 1., DoesNotRaise()), ((0., 0., 3., 3.), (1., 1.,", "test_box_iou( box_true: Tuple[float, float, float, float], box_detection: Tuple[float, float, float, float], expected_result: Optional[float],", "expected_result: Optional[float], exception: Exception ) -> None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection)", "DoesNotRaise()), ((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0.,", "1., 1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ), ( np.array([ [0., 0.,", "[0., 0., 1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1.,", "contextlib import ExitStack as DoesNotRaise from typing import Tuple, Optional import numpy as", "result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ ( None, np.array([ [0.,", "np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection,", "[ (None, None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1., 1.), None,", "((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2.,", ") def test_box_iou( box_true: Tuple[float, float, float, float], box_detection: Tuple[float, float, float, float],", "== expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ ( None, np.array([ [0., 0.25,", "]), np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise() ), ( np.array([ [0., 0.,", "0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()), ((1., 0., 2., 1.),", "((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25,", "[0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25] ]), np.array([ [0.6],", "10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'),", "1.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0.,", "(np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10,", "result = box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\",", "DoesNotRaise from typing import Tuple, Optional import numpy as np import pytest from", "boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true,", "0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0.,", "box_detection: Tuple[float, float, float, float], expected_result: Optional[float], exception: Exception ) -> None: with", "1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.,", "1.75] ]), np.array([ [0., 0.25, 1., 1.25], [0., 0.75, 1., 1.75], [1., 1.,", "[0., 0.25, 1., 1.25], [0., 0.75, 1., 1.75], [1., 1., 2., 2.] ]),", "10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def", "(1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0., 0., 3.,", "0., DoesNotRaise()), ((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0.,", "np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [1., 0.],", "DoesNotRaise()), ((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0.,", "1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25] ]), np.array([", "np.array([ [0., 0.25, 1., 1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ), (", "2.5] ]), np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([", "(np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] )", "None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0.,", "= 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10,", "0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()), ((0., 0., 3., 3.),", "1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()),", "1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0.,", "* 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10,", "0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1.,", "1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()),", "0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.),", "0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0.,", "((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()), ((0, 1., 1.,", "1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1., 1., 2.), 0.,", "@pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20,", "expected_result: Optional[float], exception: Exception ) -> None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection)", "test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None: with exception: result", "0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception)", "0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25, 1., 1.25),", "1.), 0., DoesNotRaise()), ((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()),", "1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)),", "( None, np.array([ [0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ), ( np.array([", "= box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [", "\"boxes_true, boxes_detection, expected_result, exception\", [ ( None, np.array([ [0., 0.25, 1., 1.25] ]),", "((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1.,", "np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [0., 0.,", "(0., 0., 3., 3.), 1/9, DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float, float,", "), ( np.array([ [0., 0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError) ), (", ") -> None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result", "1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25] ]), np.array([ [0.6], [1/3] ]),", "np.array([ [0., 0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError) ), ( np.array([ [0.,", "[0.6, 1/7, 0], [1/3, 1., 0] ]), DoesNotRaise() ) ] ) def test_box_iou_batch(", "box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ (", "1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [1., 0.], [0., 1.] ]),", "[2., 2., 2.5, 2.5] ]), np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise() ),", "from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None,", "0., 1., 1.), 1., DoesNotRaise()), ((0., 0., 3., 3.), (1., 1., 2., 2.),", "0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [0., 0., 1., 1.],", "1/9, DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float, float, float, float], box_detection: Tuple[float,", "None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0.,", "10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'),", "1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0.,", "box_true: Tuple[float, float, float, float], box_detection: Tuple[float, float, float, float], expected_result: Optional[float], exception:", "DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()),", "0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (1., 0., 2., 1.),", "ExitStack as DoesNotRaise from typing import Tuple, Optional import numpy as np import", "DoesNotRaise()), ((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise()) ] )", "exception: Exception ) -> None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result)", "with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5,", "as np import pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection,", "QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None, None, None,", "(0., 0., 1., 1.), 1., DoesNotRaise()), ((0., 0., 3., 3.), (1., 1., 2.,", "Exception ) -> None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result ==", "1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()),", "1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'),", "0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0.,", "1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()),", "10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25,", "[ ( None, np.array([ [0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ), (", "]), np.array([ [0., 0.25, 1., 1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ),", "0] ]), DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result:", "np import pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result,", "(np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0.,", "((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1.,", "DoesNotRaise()), ((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()), ((1., 0.,", "((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.],", "DoesNotRaise()), ((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()), ((0., 0.,", "1., 1., 2.), 0., DoesNotRaise()), ((0, 1., 1., 2.), (0., 0., 1., 1.),", "3., 3.), 1/9, DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float, float, float, float],", "np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10,", "float], expected_result: Optional[float], exception: Exception ) -> None: with exception: result = box_iou(box_true=box_true,", "DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0.,", "DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0.,", "QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\",", "0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25], [0., 0.75, 1., 1.75],", "] ) def test_box_iou( box_true: Tuple[float, float, float, float], box_detection: Tuple[float, float, float,", "10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()),", "1.25], [0., 0.75, 1., 1.75], [1., 1., 2., 2.] ]), np.array([ [0.6, 1/7,", "1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0.,", "2.), (0., 0., 3., 3.), 1/9, DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float,", "0.25, 1., 1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ), ( np.array([ [0.,", "10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2,", "np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10,", "0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)),", "float, float, float], expected_result: Optional[float], exception: Exception ) -> None: with exception: result", "(np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10,", ") -> None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK =", "] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception )", "result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] =", "1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()),", "[0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1.,", "0.75, 1., 1.75], [1., 1., 2., 2.] ]), np.array([ [0.6, 1/7, 0], [1/3,", "DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float, float, float, float], box_detection: Tuple[float, float,", "None, pytest.raises(ValueError) ), ( np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5]", "exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20,", "1.25] ]), None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1., 1.25] ]), None,", "expected_result: float, exception: Exception) -> None: with exception: result = mask_iou(mask_true=mask_true, mask_detection=mask_detection) assert", "float, float], box_detection: Tuple[float, float, float, float], expected_result: Optional[float], exception: Exception ) ->", "[0., 0.75, 1., 1.75], [1., 1., 2., 2.] ]), np.array([ [0.6, 1/7, 0],", "test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception ) -> None: with", "None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'),", "10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)),", "1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0.,", "]), np.array([ [0.6, 1/7, 0], [1/3, 1., 0] ]), DoesNotRaise() ) ] )", "1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (1., 0., 2., 1.), 0.,", "1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6,", "np.array([ [0.6, 1/7, 0], [1/3, 1., 0] ]), DoesNotRaise() ) ] ) def", "[0., 1.] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0., 0.75,", "with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection,", "1.), 1., DoesNotRaise()), ((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()),", "def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception ) -> None:", "0., 1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25],", "]), None, None, pytest.raises(ValueError) ), ( np.array([ [0., 0., 1., 1.], [2., 2.,", "exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result,", "0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.),", "onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None, None,", "20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8')", "pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()),", "pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0.,", "float, exception: Exception) -> None: with exception: result = mask_iou(mask_true=mask_true, mask_detection=mask_detection) assert result", "\"mask_true, mask_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'),", "np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') *", "None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None,", "), ( np.array([ [0., 0., 1., 1.], [0., 0.75, 1., 1.75] ]), np.array([", "mask_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None,", "0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0., 1., 1.], None,", "10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None, None,", "]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.],", "Optional[float], exception: Exception ) -> None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result,", "exception: Exception) -> None: with exception: result = mask_iou(mask_true=mask_true, mask_detection=mask_detection) assert result ==", "1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0.,", "0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0., 1., 1.),", "0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None,", "None, np.array([ [0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ), ( np.array([ [0.,", "1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6,", "]), np.array([ [0., 0.25, 1., 1.25], [0., 0.75, 1., 1.75], [1., 1., 2.,", "2., 2.] ]), np.array([ [0.6, 1/7, 0], [1/3, 1., 0] ]), DoesNotRaise() )", "exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5]", ") def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None: with", "as DoesNotRaise from typing import Tuple, Optional import numpy as np import pytest", "(0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0., 1.25,", "2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise()) ] ) def test_box_iou( box_true:", "-> None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10,", "0.], [0., 1.] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0.,", "= np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [", "((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1.,", "1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection:", "None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0.,", "3.), 1/9, DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float, float, float, float], box_detection:", "expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ ( None, np.array([ [0., 0.25, 1.,", "box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize(", "(np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10,", "import Tuple, Optional import numpy as np import pytest from onemetric.cv.utils.iou import box_iou,", "2.5] ]), np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise() ), ( np.array([ [0.,", "0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0., 1., 1.),", "pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [", "typing import Tuple, Optional import numpy as np import pytest from onemetric.cv.utils.iou import", "box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), ((0., 0.,", "(0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (1., 0., 2.,", "20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'),", "10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array,", "Exception) -> None: with exception: result = mask_iou(mask_true=mask_true, mask_detection=mask_detection) assert result == expected_result", "1.75] ]), np.array([ [0., 0.25, 1., 1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise()", "mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), ((0.,", "0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25,", "exception\", [ ( None, np.array([ [0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ),", "[1., 1., 2., 2.] ]), np.array([ [0.6, 1/7, 0], [1/3, 1., 0] ]),", "(1., 0., 2., 1.), 0., DoesNotRaise()), ((1., 0., 2., 1.), (0., 0., 1.,", "1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0.,", "np.array([ [0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25,", "0., DoesNotRaise()), ((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()), ((1.,", "1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0.,", "3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0.,", "1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6,", "None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10,", "mask_detection: np.array, expected_result: float, exception: Exception) -> None: with exception: result = mask_iou(mask_true=mask_true,", "1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0., 1., 1.), 1.,", "np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10,", "1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise()) ]", "1.), (1., 0., 2., 1.), 0., DoesNotRaise()), ((1., 0., 2., 1.), (0., 0.,", "DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0., 0.75, 1., 1.75] ]),", "np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result, exception\", [ (None,", "((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1.,", "-> None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize(", "1., 1.25] ]), None, None, pytest.raises(ValueError) ), ( np.array([ [0., 0., 1., 1.],", "10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'),", "0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()), ((0.,", "Tuple[float, float, float, float], box_detection: Tuple[float, float, float, float], expected_result: Optional[float], exception: Exception", "( np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [0.,", "[0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1., 1.,", "0., 2., 1.), 0., DoesNotRaise()), ((1., 0., 2., 1.), (0., 0., 1., 1.),", "(0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0., 1.,", "1.] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0., 0.75, 1.,", "0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0.,", "None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()), ((0,", "Optional[float], exception: Exception ) -> None: with exception: result = box_iou(box_true=box_true, box_detection=box_detection) assert", "boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception ) -> None: with exception: result =", "1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()), ((0., 0., 3., 3.), (1.,", "]), np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [1.,", "(np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result:", "1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25,", "QUARTER_MASK, 0.25, DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception:", "2.5, 2.5] ]), np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]),", "np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1.,", "pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()), ((0, 1.,", "0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1., 1., 2.),", "None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None,", "pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1.,", "float], box_detection: Tuple[float, float, float, float], expected_result: Optional[float], exception: Exception ) -> None:", "2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'),", "0.25, 1., 1.25], [0., 0.75, 1., 1.75], [1., 1., 2., 2.] ]), np.array([", "\"box_true, box_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0.,", "np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK,", "1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()),", "DoesNotRaise()), ((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1.,", "1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1.,", "np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10,", "((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.),", "((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1.,", "0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25] ]), np.array([ [0.6], [1/3]", "(0., 1., 1., 2.), 0., DoesNotRaise()), ((0, 1., 1., 2.), (0., 0., 1.,", "float, float], expected_result: Optional[float], exception: Exception ) -> None: with exception: result =", "0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [1., 0.], [0., 1.]", "None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'), np.zeros((10,", "Optional import numpy as np import pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch", "]), None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1., 1.25] ]), None, None,", "2.), 0., DoesNotRaise()), ((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()),", "1.25] ]), None, None, pytest.raises(ValueError) ), ( np.array([ [0., 0., 1., 1.], [2.,", "[0., 0.25, 1., 1.25] ]), np.array([ [0.6], [1/3] ]), DoesNotRaise() ), ( np.array([", "2., 1.), 0., DoesNotRaise()), ((1., 0., 2., 1.), (0., 0., 1., 1.), 0.,", "boxes_detection, expected_result, exception\", [ ( None, np.array([ [0., 0.25, 1., 1.25] ]), None,", "pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1.,", "np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'), np.zeros((10,", "1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise()) ] ) def test_box_iou(", "DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0.,", "[1/3, 1., 0] ]), DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection:", "1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (1.,", "1., 1.), None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)),", "np.array([ [0., 0., 1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25,", "1/7, 0], [1/3, 1., 0] ]), DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true:", "(None, None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)),", "1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25], [0., 0.75,", "pytest.raises(ValueError)), (np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)),", "1., 1.], [2., 2., 2.5, 2.5] ]), np.array([ [0., 0., 1., 1.], [2.,", "[0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25], [0., 0.75, 1.,", "[1., 0.], [0., 1.] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.],", "2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9,", "1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25], [0., 0.75, 1., 1.75], [1.,", "1.), (0., 1., 1., 2.), 0., DoesNotRaise()), ((0, 1., 1., 2.), (0., 0.,", "[0., 0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError) ), ( np.array([ [0., 0.,", "2.5, 2.5] ]), np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise() ), ( np.array([", "((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise()) ] ) def", "1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25] ]),", "None, None, pytest.raises(ValueError) ), ( np.array([ [0., 0., 1., 1.], [2., 2., 2.5,", "def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None: with exception:", "]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0., 0.75, 1., 1.75]", "np.array([ [0., 0.25, 1., 1.25], [0., 0.75, 1., 1.75], [1., 1., 2., 2.]", "1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1., 1.), (0., 1.,", "2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25,", "3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0., 0.,", "expected_result, exception\", [ ( None, np.array([ [0., 0.25, 1., 1.25] ]), None, pytest.raises(ValueError)", "1., 1.25], [0., 0.75, 1., 1.75], [1., 1., 2., 2.] ]), np.array([ [0.6,", "1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0.,", "1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()), ((0, 1., 1., 2.), (0.,", "1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0.,", "10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'),", "0., 3., 3.), 1/9, DoesNotRaise()) ] ) def test_box_iou( box_true: Tuple[float, float, float,", "1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6,", "Exception ) -> None: with exception: result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK", "] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None:", "), ( np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5] ]), np.array([", "0., 1.25, 1.), 0.6, DoesNotRaise()), ((0.25, 0., 1.25, 1.), (0., 0., 1., 1.),", "DoesNotRaise()), (np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()),", "0], [1/3, 1., 0] ]), DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true: np.ndarray,", "None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise())", "]), DoesNotRaise() ) ] ) def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float],", "10)).astype('int16'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)), (np.ones((10, 10)).astype('uint8'),", "( np.array([ [0., 0., 1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0.,", "expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1 @pytest.mark.parametrize( \"mask_true, mask_detection, expected_result,", "box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)),", "((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()), ((0., 0., 3.,", "0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.),", ") def test_box_iou_batch( boxes_true: np.ndarray, boxes_detection: np.ndarray, expected_result: Optional[float], exception: Exception ) ->", "10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)), (np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)), (np.ones((10, 10)).astype('int16'),", "None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1., 1.25] ]), None, None, pytest.raises(ValueError)", "import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true, box_detection, expected_result, exception\", [ (None, None, None,", "DoesNotRaise()) ] ) def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) ->", "0.25, 1., 1.25] ]), None, pytest.raises(ValueError) ), ( np.array([ [0., 0.25, 1., 1.25]", "1.], [2., 2., 2.5, 2.5] ]), np.array([ [1., 0.], [0., 1.] ]), DoesNotRaise()", "((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()), ((1., 0., 2.,", "2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise())", "[1/3] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0., 0.75, 1.,", "(0., 0.25, 1., 1.25), 0.6, DoesNotRaise()), ((0., 0.25, 1., 1.25), (0., 0., 1.,", "from typing import Tuple, Optional import numpy as np import pytest from onemetric.cv.utils.iou", "1., 2., 2.), 1/9, DoesNotRaise()), ((1., 1., 2., 2.), (0., 0., 3., 3.),", "import ExitStack as DoesNotRaise from typing import Tuple, Optional import numpy as np", "float, float, float], box_detection: Tuple[float, float, float, float], expected_result: Optional[float], exception: Exception )", "10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()), (np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()), (np.ones((10, 10)).astype('uint8'),", "0., 1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25]", "1., 1.], [0., 0.75, 1., 1.75] ]), np.array([ [0., 0.25, 1., 1.25], [0.,", "box_detection, expected_result, exception\", [ (None, None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0.,", "numpy as np import pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize( \"box_true,", "exception\", [ (None, None, None, pytest.raises(ValueError)), ((0., 0., 1.), (0., 0., 1., 1.),", "np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None: with exception: result =", "Tuple[float, float, float, float], expected_result: Optional[float], exception: Exception ) -> None: with exception:", "[0.6], [1/3] ]), DoesNotRaise() ), ( np.array([ [0., 0., 1., 1.], [0., 0.75,", "(0., 0., 1.), None, pytest.raises(ValueError)), ([0., 0., 1., 1.], [0., 0., 1., 1.],", "2.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.), (1., 0.,", "0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()), ((0., 0., 1., 1.),", "import numpy as np import pytest from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch @pytest.mark.parametrize(", "1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()), ((1., 0., 2., 1.), (0.,", "(0., 0., 1., 1.), 0.6, DoesNotRaise()), ((0., 0., 1., 1.), (0., 0.25, 1.,", "2.] ]), np.array([ [0.6, 1/7, 0], [1/3, 1., 0] ]), DoesNotRaise() ) ]", "= box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection) np.testing.assert_array_equal(result, expected_result) QUARTER_MASK = np.zeros((10, 10)).astype('uint8') QUARTER_MASK[0:5, 0:5] = 1", "box_detection=box_detection) assert result == expected_result @pytest.mark.parametrize( \"boxes_true, boxes_detection, expected_result, exception\", [ ( None,", "2., 2.5, 2.5] ]), np.array([ [0., 0., 1., 1.], [2., 2., 2.5, 2.5]", "([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)), ((0., 0., 1.," ]
[ "PCA: def initialization(self,X): X = np.array(X) return X def train(self,X): self.X = self.initialization(X)", "st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in Higher Dimension state\") class", "class Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape c = np.random.randn(K,n)", "def assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m) for o in range(10): for", "if uploaded_file is not None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not", "for i in range(m): temp = np.zeros(K) for j in range(K): temp[j] =", "s: k = i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return", "= np.array(X) m,n = X.shape c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m", "c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except :", "st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying PCA') X_pca = d.K_components(k_value) st.header('X :", "= k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp", "pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in range(2):", "np.zeros(K) for j in range(K): temp[j] = np.sum((X[i,:] - c[j,:]) ** 2) idx[i]", "uploaded_file is not None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None:", "a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) ))", "np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k = 0 for", "of K value exceeds try again') if Algorithms == 'Principal Component Analysis': k_value", "st.sidebar.checkbox(\"Click Here for training\") if train_button: d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\")", "= np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k = 0 for i in", "None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file is", "= np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m) for", "file\", type=\"csv\") if uploaded_file is not None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file", "in range(K): points = [X[j] for j in range(len(X)) if idx[j] == p]", "X def train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s", "= np.argmin(temp) for p in range(K): points = [X[j] for j in range(len(X))", "= np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k = 0", "np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X = np.array(X)", "Features (Selected will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X :", ": Features (Selected will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X", "as px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\") if", "plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\")", "exceeds try again') if Algorithms == 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K", "uploaded_file is not None: drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)',", "np.array(X) m,n = X.shape c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m =", "o in range(10): for i in range(m): temp = np.zeros(K) for j in", "in s: k = i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n])", "= st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file is not None:", "not None: if Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value =", "return self.variance_exp if uploaded_file is not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal", "a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'],", "elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l')", "[X[j] for j in range(len(X)) if idx[j] == p] c[p] = np.mean(points, axis=0)", ": Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n =", "X = data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X) if uploaded_file is not", "variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return", "as pd import plotly.graph_objects as go import plotly.express as px st.title(\"Synapse Unsupervised Models\")", "for training\") if train_button: d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering", "sum_s = np.sum(s) self.variance_exp= [] k = 0 for i in s: k", "range(m): temp = np.zeros(K) for j in range(K): temp[j] = np.sum((X[i,:] - c[j,:])", "Here for training\") if train_button: d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c)", "fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else:", "is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X :", "x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is", "again') if Algorithms == 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K components value',value", "data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X) if uploaded_file is not None: if", "3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d = PCA() d.train(X)", "c[p] = np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c =", "<NAME>,<NAME> \"\"\" import numpy as np import streamlit as st import pandas as", "pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None: drop_column = st.sidebar.multiselect('X : Features (Selected", "= st.sidebar.checkbox(\"Click Here for training\") if train_button: d = Kmeans() c,data = d.test(X,k_value)", "np.argmin(temp) for p in range(K): points = [X[j] for j in range(len(X)) if", "d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph", "= pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d):", "self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx)", "= pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in", "Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying PCA') X_pca = d.K_components(k_value) st.header('X", "marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y',", "np.array(X) return X def train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v =", "st.write(X) if uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X -", "Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file is not", "be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X) if", "pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for", "if uploaded_file is not None: drop_column = st.sidebar.multiselect('X : Features (Selected will be", "st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n", "= ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3:", "not None: drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list()) X", "d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data)", "'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K components value',value = 3) train_button =", "in Higher Dimension state\") class PCA: def initialization(self,X): X = np.array(X) return X", "st.info('Always Use Feature Normalization when applying PCA') X_pca = d.K_components(k_value) st.header('X : Feature", "=1) return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)]", "np.sum(s) self.variance_exp= [] k = 0 for i in s: k = i+k", "= 0 for i in s: k = i+k variance = k/sum_s self.variance_exp.append(variance)", "m = X.shape[0] idx = np.zeros(m) for o in range(10): for i in", "Feature Normalization when applying PCA') X_pca = d.K_components(k_value) st.header('X : Feature (PCA)') st.write(X_pca)", "value exceeds try again') if Algorithms == 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter", "st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file is not None: if", "test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx)", "return X def train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix)", "K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp if uploaded_file is not", "train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s)", "if st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X)", "if uploaded_file is not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis')", "for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],)", "Clustering': k_value = st.sidebar.number_input('Enter K value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for", "np.sum((X[i,:] - c[j,:]) ** 2) idx[i] = np.argmin(temp) for p in range(K): points", "= (X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3):", "= data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X) if uploaded_file is not None:", "train_button: d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when", "self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in Higher Dimension state\") class PCA: def", "else: st.error(\"Your data is in Higher Dimension state\") class PCA: def initialization(self,X): X", "k_value = st.sidebar.number_input('Enter K components value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for", "def initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape c = np.random.randn(K,n) return X,c,K", "temp[j] = np.sum((X[i,:] - c[j,:]) ** 2) idx[i] = np.argmin(temp) for p in", "st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying PCA') X_pca = d.K_components(k_value)", "range(10): for i in range(m): temp = np.zeros(K) for j in range(K): temp[j]", "a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3:", "plotly.graph_objects as go import plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose", "training\") if train_button: d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature", "= np.sum((X[i,:] - c[j,:]) ** 2) idx[i] = np.argmin(temp) for p in range(K):", "go import plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv", "= 1) st.header('X : Features') st.write(X) if uploaded_file is not None: if st.sidebar.checkbox(\"Feature", "self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data", "def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1']", "(Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape c", "st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1)", "self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp=", "is not None: if Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value", "Analysis') ) if uploaded_file is not None: if Algorithms == 'K-means Clustering': k_value", "labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not computed with NaN values or", "self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1]", "not computed with NaN values or no. of K value exceeds try again')", "self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1)", "numpy as np import streamlit as st import pandas as pd import plotly.graph_objects", "st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not computed with NaN values or no.", "= st.sidebar.number_input('Enter K value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if", "= PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying PCA')", "X = np.array(X) return X def train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T)", "i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig", "in range(K): temp[j] = np.sum((X[i,:] - c[j,:]) ** 2) idx[i] = np.argmin(temp) for", "elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in Higher Dimension state\") class PCA:", "i in s: k = i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X=", "is in Higher Dimension state\") class PCA: def initialization(self,X): X = np.array(X) return", "Analysis': k_value = st.sidebar.number_input('Enter K components value',value = 3) train_button = st.sidebar.checkbox(\"Click Here", "st.sidebar.number_input('Enter K components value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if", "('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file is not None: if Algorithms ==", "if train_button: d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with", "'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in Higher Dimension state\")", "range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'],", "components value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d", "import pandas as pd import plotly.graph_objects as go import plotly.express as px st.title(\"Synapse", "def train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s =", "computed with NaN values or no. of K value exceeds try again') if", "= self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers',", "3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d = Kmeans() c,data", "initialization(self,X): X = np.array(X) return X def train(self,X): self.X = self.initialization(X) self.covariance_matrix =", "m,n = X.shape c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m = X.shape[0]", "c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m)", "'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file is not None: if Algorithms", ": raise ValueError('graph not computed with NaN values or no. of K value", "Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape c = np.random.randn(K,n) return", "p] c[p] = np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c", "= st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file is not None: data =", "uploaded_file is not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') )", "idx[i] = np.argmin(temp) for p in range(K): points = [X[j] for j in", "@author: <NAME>,<NAME> \"\"\" import numpy as np import streamlit as st import pandas", "np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns =", "for o in range(10): for i in range(m): temp = np.zeros(K) for j", "uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file is not None: data", "fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l']", "= px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your", "= i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def", "Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except", "uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X", "= X.shape[0] idx = np.zeros(m) for o in range(10): for i in range(m):", "np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_", "value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d =", "= self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return", "mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x',", "is not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if", "st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file is", "st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not computed", "initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape c = np.random.randn(K,n) return X,c,K def", "for j in range(K): temp[j] = np.sum((X[i,:] - c[j,:]) ** 2) idx[i] =", "try again') if Algorithms == 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K components", "st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color =", "if uploaded_file is not None: if Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter", "= st.sidebar.checkbox(\"Click Here for training\") if train_button: d = PCA() d.train(X) st.header('Variance Explained')", "st.sidebar.checkbox(\"Click Here for training\") if train_button: d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained())", "as go import plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a", "Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans:", "= pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None: drop_column = st.sidebar.multiselect('X : Features", "return X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m) for o in", "= self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data =", "'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value = 3) train_button = st.sidebar.checkbox(\"Click Here", "no. of K value exceeds try again') if Algorithms == 'Principal Component Analysis':", "= 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in Higher Dimension", "in range(10): for i in range(m): temp = np.zeros(K) for j in range(K):", "= st.sidebar.number_input('Enter K components value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\")", "= np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K)", "= np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns", "return self.X def variance_explained(self): return self.variance_exp if uploaded_file is not None: Algorithms =", "import plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv file\",", "Features') st.write(X) if uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X", "= st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis =", "\"\"\" import numpy as np import streamlit as st import pandas as pd", "= [X[j] for j in range(len(X)) if idx[j] == p] c[p] = np.mean(points,", "self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k =", "#except : raise ValueError('graph not computed with NaN values or no. of K", "in range(m): temp = np.zeros(K) for j in range(K): temp[j] = np.sum((X[i,:] -", ")) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color", "(X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X", "['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\")", "with labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not computed with NaN values", "Use Feature Normalization when applying PCA') X_pca = d.K_components(k_value) st.header('X : Feature (PCA)')", "dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X) if uploaded_file", "self.variance_exp if uploaded_file is not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component", "[] k = 0 for i in s: k = i+k variance =", "a csv file\", type=\"csv\") if uploaded_file is not None: data = pd.read_csv(uploaded_file) st.write(data)", "st.sidebar.number_input('Enter K value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button:", "- np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X =", "Data with labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not computed with NaN", "self.X def variance_explained(self): return self.variance_exp if uploaded_file is not None: Algorithms = st.sidebar.selectbox(", "self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k", "= go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig", "PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying PCA') X_pca", "K components value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button:", "y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in", "idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_", "px st.title(\"Synapse Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file", "= np.sum(s) self.variance_exp= [] k = 0 for i in s: k =", "Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value = 3) train_button =", "axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ =", "with NaN values or no. of K value exceeds try again') if Algorithms", "st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class", "state\") class PCA: def initialization(self,X): X = np.array(X) return X def train(self,X): self.X", "range(K): temp[j] = np.sum((X[i,:] - c[j,:]) ** 2) idx[i] = np.argmin(temp) for p", "not None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None: drop_column =", ") if uploaded_file is not None: if Algorithms == 'K-means Clustering': k_value =", "for j in range(len(X)) if idx[j] == p] c[p] = np.mean(points, axis=0) return", "import plotly.graph_objects as go import plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file =", "temp = np.zeros(K) for j in range(K): temp[j] = np.sum((X[i,:] - c[j,:]) **", "plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] =", "as np import streamlit as st import pandas as pd import plotly.graph_objects as", "K value exceeds try again') if Algorithms == 'Principal Component Analysis': k_value =", "None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\")", "= np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig)", "ValueError('graph not computed with NaN values or no. of K value exceeds try", "y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d,", "self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp if uploaded_file", "== 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K components value',value = 3) train_button", "if idx[j] == p] c[p] = np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K", "idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={} if", "\"\"\" @author: <NAME>,<NAME> \"\"\" import numpy as np import streamlit as st import", "variance_explained(self): return self.variance_exp if uploaded_file is not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means", "k_value = st.sidebar.number_input('Enter K value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\")", "st.error(\"Your data is in Higher Dimension state\") class PCA: def initialization(self,X): X =", "= d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except : raise", "X = (X - np.mean(X))/np.std(X) st.header(\"X : Features (Normalized)\") st.write(X) class Kmeans: def", "Dimension state\") class PCA: def initialization(self,X): X = np.array(X) return X def train(self,X):", "Unsupervised Models\") uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file is not", "j in range(K): temp[j] = np.sum((X[i,:] - c[j,:]) ** 2) idx[i] = np.argmin(temp)", "data = pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i", "= Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data)", "np import streamlit as st import pandas as pd import plotly.graph_objects as go", "self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] =", "data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None: drop_column = st.sidebar.multiselect('X :", "idx[j] == p] c[p] = np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K =", "a={} if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],)", "not None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X) st.header(\"X : Features", "def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp if uploaded_file is", "not None: Algorithms = st.sidebar.selectbox( 'Algorithm', ('None','K-means Clustering','Principal Component Analysis') ) if uploaded_file", "import streamlit as st import pandas as pd import plotly.graph_objects as go import", "Here for training\") if train_button: d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always", "train_button: d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\")", "== p] c[p] = np.mean(points, axis=0) return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K)", "0 for i in s: k = i+k variance = k/sum_s self.variance_exp.append(variance) def", "d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif", "self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig)", "self.variance_exp= [] k = 0 for i in s: k = i+k variance", "NaN values or no. of K value exceeds try again') if Algorithms ==", "1) st.header('X : Features') st.write(X) if uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"):", "self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k = 0 for i", "None: drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list()) X =", "def initialization(self,X): X = np.array(X) return X def train(self,X): self.X = self.initialization(X) self.covariance_matrix", "k = 0 for i in s: k = i+k variance = k/sum_s", "def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ =", "c[j,:]) ** 2) idx[i] = np.argmin(temp) for p in range(K): points = [X[j]", "i in range(m): temp = np.zeros(K) for j in range(K): temp[j] = np.sum((X[i,:]", "X = np.array(X) m,n = X.shape c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K):", "= self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v = np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= []", "st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not computed with", "- c[j,:]) ** 2) idx[i] = np.argmin(temp) for p in range(K): points =", "if train_button: d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization", "in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig =", "np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp if uploaded_file is not None: Algorithms", "d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying PCA') X_pca =", "Algorithms == 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K components value',value = 3)", "pandas as pd import plotly.graph_objects as go import plotly.express as px st.title(\"Synapse Unsupervised", "pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={}", "for i in s: k = i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2):", "is not None: drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list())", "np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m) for o", "p in range(K): points = [X[j] for j in range(len(X)) if idx[j] ==", "np.zeros(m) for o in range(10): for i in range(m): temp = np.zeros(K) for", "uploaded_file is not None: if Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter K", "None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None: drop_column = st.sidebar.multiselect('X", "Higher Dimension state\") class PCA: def initialization(self,X): X = np.array(X) return X def", "print(\"Incomplete\") else: st.error(\"Your data is in Higher Dimension state\") class PCA: def initialization(self,X):", "if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] = self.X[:,i:i+1] a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],) a['a2']", "st.write(data) if uploaded_file is not None: drop_column = st.sidebar.multiselect('X : Features (Selected will", "raise ValueError('graph not computed with NaN values or no. of K value exceeds", "for training\") if train_button: d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use", "Component Analysis': k_value = st.sidebar.number_input('Enter K components value',value = 3) train_button = st.sidebar.checkbox(\"Click", "= np.zeros(m) for o in range(10): for i in range(m): temp = np.zeros(K)", "points = [X[j] for j in range(len(X)) if idx[j] == p] c[p] =", "csv file\", type=\"csv\") if uploaded_file is not None: data = pd.read_csv(uploaded_file) st.write(data) if", "for p in range(K): points = [X[j] for j in range(len(X)) if idx[j]", "st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data with labels\") st.write(data) d.plot_clusters(data) #except : raise ValueError('graph not", "d.plot_clusters(data) #except : raise ValueError('graph not computed with NaN values or no. of", "X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data def", "i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self):", ": Features') st.write(X) if uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X =", "as st import pandas as pd import plotly.graph_objects as go import plotly.express as", "def variance_explained(self): return self.variance_exp if uploaded_file is not None: Algorithms = st.sidebar.selectbox( 'Algorithm',", "j in range(len(X)) if idx[j] == p] c[p] = np.mean(points, axis=0) return idx,c", "Component Analysis') ) if uploaded_file is not None: if Algorithms == 'K-means Clustering':", "st import pandas as pd import plotly.graph_objects as go import plotly.express as px", "== 'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value = 3) train_button = st.sidebar.checkbox(\"Click", "d = PCA() d.train(X) st.header('Variance Explained') st.markdown(d.variance_explained()) st.info('Always Use Feature Normalization when applying", "import numpy as np import streamlit as st import pandas as pd import", "(Selected will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X : Features')", "Models\") uploaded_file = st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file is not None:", "if uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X = (X - np.mean(X))/np.std(X)", "return idx,c def test(self,X,K=3): self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X)", "return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2: for i in range(2): a['a'+str(i+1)] =", "Clustering','Principal Component Analysis') ) if uploaded_file is not None: if Algorithms == 'K-means", "z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data is in Higher", "K value',value = 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d", "self.X,c,self.K = self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data", "train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d = Kmeans() c,data =", "px.scatter_3d(d, x='x', y='y', z='z',color = 'l') st.plotly_chart(fig) elif self.X.shape[1]==3: print(\"Incomplete\") else: st.error(\"Your data", "streamlit as st import pandas as pd import plotly.graph_objects as go import plotly.express", "range(K): points = [X[j] for j in range(len(X)) if idx[j] == p] c[p]", "training\") if train_button: d = Kmeans() c,data = d.test(X,k_value) st.subheader(\"Centroids\") st.write(c) st.subheader(\"Clustering Data", "X.shape[0] idx = np.zeros(m) for o in range(10): for i in range(m): temp", "st.header('X : Features') st.write(X) if uploaded_file is not None: if st.sidebar.checkbox(\"Feature Normalization\"): X", "= 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d = PCA()", "self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp if uploaded_file is not None:", "type=\"csv\") if uploaded_file is not None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is", "None: if Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value = 3)", "self.initialize_var(X,K) self.idx,self.c = self.assignment_move(self.X,c,self.K) X_ = pd.DataFrame(self.X) idx_ = pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis", "assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m) for o in range(10): for i", "pd import plotly.graph_objects as go import plotly.express as px st.title(\"Synapse Unsupervised Models\") uploaded_file", "np.linalg.svd(self.covariance_matrix) sum_s = np.sum(s) self.variance_exp= [] k = 0 for i in s:", "k = i+k variance = k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X", "drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis", "data is in Higher Dimension state\") class PCA: def initialization(self,X): X = np.array(X)", "if Algorithms == 'Principal Component Analysis': k_value = st.sidebar.number_input('Enter K components value',value =", "train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d = PCA() d.train(X) st.header('Variance", "values or no. of K value exceeds try again') if Algorithms == 'Principal", "class PCA: def initialization(self,X): X = np.array(X) return X def train(self,X): self.X =", "or no. of K value exceeds try again') if Algorithms == 'Principal Component", "if Algorithms == 'K-means Clustering': k_value = st.sidebar.number_input('Enter K value',value = 3) train_button", "st.file_uploader(\"Choose a csv file\", type=\"csv\") if uploaded_file is not None: data = pd.read_csv(uploaded_file)", "k/sum_s self.variance_exp.append(variance) def K_components(self,n=2): self.X= np.dot(self.X,self.u[:,:n]) return self.X def variance_explained(self): return self.variance_exp if", "= pd.DataFrame(self.idx) data = pd.concat([X_,idx_],axis =1) return self.c,data def plot_clusters(self,d): a={} if self.X.shape[1]==2:", "st.write(X) class Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape c =", "go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif self.X.shape[1]==3: d.columns = ['x','y','z','l'] fig =", "X.shape c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx =", "= 3) train_button = st.sidebar.checkbox(\"Click Here for training\") if train_button: d = Kmeans()", "X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx = np.zeros(m) for o in range(10):", "Features (Normalized)\") st.write(X) class Kmeans: def initialize_var(self,X,K=3): X = np.array(X) m,n = X.shape", "2) idx[i] = np.argmin(temp) for p in range(K): points = [X[j] for j", "in range(len(X)) if idx[j] == p] c[p] = np.mean(points, axis=0) return idx,c def", "= np.zeros(K) for j in range(K): temp[j] = np.sum((X[i,:] - c[j,:]) ** 2)", "np.reshape(a['a1'],(a['a1']).shape[0],) a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],) fig = go.Figure(data=go.Scatter(x=a['a1'], y=a['a2'], mode='markers', marker=dict(color=self.idx) )) st.plotly_chart(fig) elif", "= np.array(X) return X def train(self,X): self.X = self.initialization(X) self.covariance_matrix = np.cov(X.T) self.u,s,v", "will be dropped)', data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X)", "** 2) idx[i] = np.argmin(temp) for p in range(K): points = [X[j] for", "= X.shape c = np.random.randn(K,n) return X,c,K def assignment_move(self,X,c,K): m = X.shape[0] idx", "range(len(X)) if idx[j] == p] c[p] = np.mean(points, axis=0) return idx,c def test(self,X,K=3):", "is not None: data = pd.read_csv(uploaded_file) st.write(data) if uploaded_file is not None: drop_column", "idx = np.zeros(m) for o in range(10): for i in range(m): temp =", "data.columns.to_list()) X = data.drop(drop_column,axis = 1) st.header('X : Features') st.write(X) if uploaded_file is" ]
[]
[ "= ( [MinimalResource(**move) for move in use_after] if use_after is not None else", "healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance", "min_turns: int max_turns: int drain: int healing: int crit_rate: int ailment_chance: int flinch_chance:", "List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any], type: Dict[str, Any], ) ->", "aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url", "int, max_hits: int, min_turns: int, max_turns: int, drain: int, healing: int, crit_rate: int,", "Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"]", "Resource if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat,", "= MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns:", "-> None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect)", "MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int pp: int effect_entries: List[\"VerboseEffect\"]", "use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str,", "max_turns: int drain: int healing: int crit_rate: int ailment_chance: int flinch_chance: int stat_chance:", "effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes = [", "MoveMetaData(**meta) self.names = [Name(**name) for name in names] self.past_values = [ PastMoveStatValues(**past_value) for", "is not None else None self.super = ContestComboDetail(**super) if super is not None", "in names] self.past_values = [ PastMoveStatValues(**past_value) for past_value in past_values ] self.power =", "= pp self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in stat_changes", "past_values ] self.power = power self.pp = pp self.priority = priority self.stat_changes =", "] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ]", "MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]]", "Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]],", "names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int, pp: int, priority: int, stat_changes:", "Dict[str, Any], ) -> None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos)", "Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]],", ") from aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"]", "effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon:", "if normal is not None else None self.super = ContestComboDetail(**super) if super is", "None: self.change = change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int", "def __init__( self, *, accuracy: int, effect_chance: int, power: int, pp: int, effect_entries:", "int flinch_chance: int stat_chance: int def __init__( self, *, ailment: Dict[str, Any], category:", "in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries", "def __init__( self, *, flavor_text: str, language: Dict[str, Any], version_group: Dict[str, Any], )", "self.super = ContestComboDetail(**super) if super is not None else None class ContestComboDetail(Resource): use_before:", "= max_turns self.drain = drain self.healing = healing self.crit_rate = crit_rate self.ailment_chance =", "contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries:", "= min_turns self.max_turns = max_turns self.drain = drain self.healing = healing self.crit_rate =", "-> None: self.accuracy = accuracy self.effect_chance = effect_chance self.power = power self.pp =", "def __init__( self, *, id: int, name: str, accuracy: int, contest_combos: Dict[str, Any],", "self.power = power self.pp = pp self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change)", "Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before = ( [MinimalResource(**move) for", "from typing import TYPE_CHECKING, Any, Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from", "__init__( self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) -> None: self.normal", "self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect", "= drain self.healing = healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance =", "ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int max_turns: int drain:", "ContestComboDetail(**super) if super is not None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after:", "in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation", "int, min_turns: int, max_turns: int, drain: int, healing: int, crit_rate: int, ailment_chance: int,", "contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries:", "] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation)", "meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int priority: int stat_changes:", "stat: Dict[str, Any], ) -> None: self.change = change self.stat = MinimalResource(**stat) class", "Any], min_hits: int, max_hits: int, min_turns: int, max_turns: int, drain: int, healing: int,", "= min_hits self.max_hits = max_hits self.min_turns = min_turns self.max_turns = max_turns self.drain =", "stat: MinimalResource[\"Stat\"] def __init__( self, *, change: int, stat: Dict[str, Any], ) ->", "__init__( self, *, id: int, name: str, accuracy: int, contest_combos: Dict[str, Any], contest_effect:", "= power self.pp = pp self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change) for", "str, language: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.flavor_text = flavor_text", "self.flinch_chance = flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def", "List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str,", "NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget,", "List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int,", "if super is not None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]]", "[ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [", "flinch_chance: int stat_chance: int def __init__( self, *, ailment: Dict[str, Any], category: Dict[str,", "Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]],", "MoveTarget, ) from aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect:", "self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes", "Any], version_group: Dict[str, Any], ) -> None: self.flavor_text = flavor_text self.language = MinimalResource(**language)", "MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int", "= [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine", "damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str,", "self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change)", "for effect_entry in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in effect_changes", "Dict[str, Any], ) -> None: self.change = change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource):", "ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory,", "max_hits: int min_turns: int max_turns: int drain: int healing: int crit_rate: int ailment_chance:", "flavor_text: str, language: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.flavor_text =", "= ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance =", "self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance", "class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance:", "self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target", "None: self.use_before = ( [MinimalResource(**move) for move in use_before] if use_before is not", "MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str, language: Dict[str, Any], version_group: Dict[str, Any],", "Any]], power: int, pp: int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any],", "MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int contest_combos:", "= Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super:", "None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type", "meta: Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int, pp: int,", "power: int, pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str, Any],", "int pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"]", "pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def", "Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int]", "List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str,", "class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__( self, *, change: int, stat:", "class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int max_turns:", "self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits self.min_turns = min_turns self.max_turns", "effect_entry in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in effect_changes ]", "MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int, effect_chance: int, power: int,", "def __init__( self, *, change: int, stat: Dict[str, Any], ) -> None: self.change", "self.stat_chance = stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__( self, *,", "Dict[str, Any], category: Dict[str, Any], min_hits: int, max_hits: int, min_turns: int, max_turns: int,", "from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, )", "Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) -> None: self.normal = ContestComboDetail(**normal) if normal", "in past_values ] self.power = power self.pp = pp self.priority = priority self.stat_changes", "Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import Resource", "Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility", "int def __init__( self, *, ailment: Dict[str, Any], category: Dict[str, Any], min_hits: int,", "normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) -> None: self.normal = ContestComboDetail(**normal) if", "int min_turns: int max_turns: int drain: int healing: int crit_rate: int ailment_chance: int", "type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int, effect_chance: int, power:", "self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon", "List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"]", "type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int, name: str, accuracy: int, contest_combos:", "min_turns self.max_turns = max_turns self.drain = drain self.healing = healing self.crit_rate = crit_rate", "Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]],", "use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before = ( [MinimalResource(**move)", "Any], ) -> None: self.accuracy = accuracy self.effect_chance = effect_chance self.power = power", "import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import", "super is not None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def", "-> None: self.change = change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance:", "Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"]", "int, stat_chance: int, ) -> None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits", "Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before = ( [MinimalResource(**move) for move", "self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"]", "= pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.type =", "self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *,", "type: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.accuracy = accuracy self.effect_chance", "*, change: int, stat: Dict[str, Any], ) -> None: self.change = change self.stat", "str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str, language: Dict[str,", "self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.type = MinimalResource(**type) self.version_group", "= [ AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for", "( [MinimalResource(**move) for move in use_after] if use_after is not None else None", "self, *, ailment: Dict[str, Any], category: Dict[str, Any], min_hits: int, max_hits: int, min_turns:", "damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon:", "MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int, name: str, accuracy: int, contest_combos: Dict[str,", "id: int, name: str, accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type:", "None: self.normal = ContestComboDetail(**normal) if normal is not None else None self.super =", "= power self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries", "self.use_before = ( [MinimalResource(**move) for move in use_before] if use_before is not None", "self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class", "= flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__(", "machine in machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name) for name in names]", "PastMoveStatValues(**past_value) for past_value in past_values ] self.power = power self.pp = pp self.priority", "machines: List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power:", "Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import (", "Dict[str, Any], type: Dict[str, Any], ) -> None: super().__init__(id=id, name=name) self.accuracy = accuracy", "in use_after] if use_after is not None else None ) class MoveFlavorText(Resource): flavor_text:", "max_turns: int, drain: int, healing: int, crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance:", "import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language class Move(NamedResource):", "in machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name) for name in names] self.past_values", "VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import Resource if TYPE_CHECKING:", "name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type)", "effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str,", "drain self.healing = healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance", "None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str,", "move in use_after] if use_after is not None else None ) class MoveFlavorText(Resource):", "Any, Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from", ") -> None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits", "contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation:", "self, *, accuracy: int, effect_chance: int, power: int, pp: int, effect_entries: List[Dict[str, Any]],", "else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before:", "learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp:", "MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str,", "import MinimalResource, Url from aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources import (", "min_hits: int max_hits: int min_turns: int max_turns: int drain: int healing: int crit_rate:", "target: Dict[str, Any], type: Dict[str, Any], ) -> None: super().__init__(id=id, name=name) self.accuracy =", "if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup,", "for move in use_after] if use_after is not None else None ) class", "ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"]", "self, *, change: int, stat: Dict[str, Any], ) -> None: self.change = change", "self.accuracy = accuracy self.effect_chance = effect_chance self.power = power self.pp = pp self.effect_entries", "MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ]", "self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before =", "List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"]", "MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int", "Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"]", "self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) -> None: self.normal =", "\"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int priority: int stat_changes: List[\"MoveStatChange\"]", "for past_value in past_values ] self.power = power self.pp = pp self.priority =", "change: int, stat: Dict[str, Any], ) -> None: self.change = change self.stat =", "int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__(", "= MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for", "MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import", "self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries", "crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change:", "self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int", "Optional[Dict[str, Any]], ) -> None: self.normal = ContestComboDetail(**normal) if normal is not None", "__init__( self, *, change: int, stat: Dict[str, Any], ) -> None: self.change =", "__init__( self, *, accuracy: int, effect_chance: int, power: int, pp: int, effect_entries: List[Dict[str,", "int, ailment_chance: int, flinch_chance: int, stat_chance: int, ) -> None: self.ailment = MinimalResource(**ailment)", "self.past_values = [ PastMoveStatValues(**past_value) for past_value in past_values ] self.power = power self.pp", "MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon)", "def __init__( self, *, ailment: Dict[str, Any], category: Dict[str, Any], min_hits: int, max_hits:", "= MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int", "stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__( self, *, change: int,", "List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"]", "int stat: MinimalResource[\"Stat\"] def __init__( self, *, change: int, stat: Dict[str, Any], )", "for flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for", "Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) ->", "aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources import", "drain: int, healing: int, crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance: int, )", "category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int max_turns: int drain: int healing:", "= ContestComboDetail(**normal) if normal is not None else None self.super = ContestComboDetail(**super) if", "generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power:", "self.target = MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def", "import Resource if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon,", "[ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change", "from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language", "not None else None ) self.use_after = ( [MinimalResource(**move) for move in use_after]", "MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits self.min_turns = min_turns", "int, ) -> None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits", "= ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change: int stat:", "accuracy: int, effect_chance: int, power: int, pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str,", "self.normal = ContestComboDetail(**normal) if normal is not None else None self.super = ContestComboDetail(**super)", "MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine)", "effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines:", "Any], type: Dict[str, Any], ) -> None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos", "int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str, Any], ) -> None:", "super: Optional[Dict[str, Any]], ) -> None: self.normal = ContestComboDetail(**normal) if normal is not", "( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language class Move(NamedResource): accuracy:", "aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"]", "TYPE_CHECKING, Any, Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange", "*, ailment: Dict[str, Any], category: Dict[str, Any], min_hits: int, max_hits: int, min_turns: int,", "import Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class:", "self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change: int", "pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy:", "import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url from", "TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, )", "= ( [MinimalResource(**move) for move in use_before] if use_before is not None else", "int max_hits: int min_turns: int max_turns: int drain: int healing: int crit_rate: int", "in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in", "self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.type", "MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int", "[ MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target)", "Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries:", "MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int, name: str, accuracy: int,", "int, pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str, Any], )", "int, drain: int, healing: int, crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance: int,", "= MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits self.min_turns = min_turns self.max_turns =", "flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__( self,", "None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits", "ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after:", "*, accuracy: int, effect_chance: int, power: int, pp: int, effect_entries: List[Dict[str, Any]], type:", "power: int pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type:", "from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from", "for stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type =", "None: self.accuracy = accuracy self.effect_chance = effect_chance self.power = power self.pp = pp", "class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str, Any]],", "aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from", "= [ PastMoveStatValues(**past_value) for past_value in past_values ] self.power = power self.pp =", "Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) ->", "MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int max_turns: int drain: int healing: int", "None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *,", ") from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import", "from aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation,", "Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, )", "from aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type:", "name: str, accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any],", "SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect,", "flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values:", "flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon", "int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any], type:", "stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id:", "] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries = [", "= max_hits self.min_turns = min_turns self.max_turns = max_turns self.drain = drain self.healing =", "VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in", "self.change = change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int power:", "= MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries", "import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource,", "effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation =", "] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal:", "machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name) for name in names] self.past_values =", "List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int, pp: int, priority: int, stat_changes: List[Dict[str,", "= change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int", "for move in use_before] if use_before is not None else None ) self.use_after", "Any], target: Dict[str, Any], type: Dict[str, Any], ) -> None: super().__init__(id=id, name=name) self.accuracy", "= crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance class MoveStatChange(Resource):", "typing import TYPE_CHECKING, Any, Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability", ") -> None: self.accuracy = accuracy self.effect_chance = effect_chance self.power = power self.pp", "aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves", "int, name: str, accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str,", ") self.use_after = ( [MinimalResource(**move) for move in use_after] if use_after is not", ") class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *,", "effect_change in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ]", "ContestComboDetail(**normal) if normal is not None else None self.super = ContestComboDetail(**super) if super", "pp self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in stat_changes ]", "Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int, name: str,", "Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int, pp: int, priority: int,", "self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry)", "Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries:", "stat_chance: int def __init__( self, *, ailment: Dict[str, Any], category: Dict[str, Any], min_hits:", "effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.accuracy", "super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type =", "Any]], past_values: List[Dict[str, Any]], power: int, pp: int, priority: int, stat_changes: List[Dict[str, Any]],", "max_hits: int, min_turns: int, max_turns: int, drain: int, healing: int, crit_rate: int, ailment_chance:", "Dict[str, Any], min_hits: int, max_hits: int, min_turns: int, max_turns: int, drain: int, healing:", "power: int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self,", "Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any],", "Any]], ) -> None: self.normal = ContestComboDetail(**normal) if normal is not None else", "use_before] if use_before is not None else None ) self.use_after = ( [MinimalResource(**move)", "past_value in past_values ] self.power = power self.pp = pp self.priority = priority", "version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str, language: Dict[str, Any], version_group: Dict[str,", "class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str, Any]]],", "int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *,", "= accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class =", "accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str,", "super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int, name:", "use_before is not None else None ) self.use_after = ( [MinimalResource(**move) for move", "None else None self.super = ContestComboDetail(**super) if super is not None else None", "flinch_chance: int, stat_chance: int, ) -> None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category)", "not None else None ) class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"]", "self.healing = healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance", "else None ) class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__(", "ailment_chance: int, flinch_chance: int, stat_chance: int, ) -> None: self.ailment = MinimalResource(**ailment) self.category", "effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int, effect_chance:", "self.machines = [MachineVersionDetail(**machine) for machine in machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name)", "Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.flavor_text = flavor_text self.language =", "-> None: self.use_before = ( [MinimalResource(**move) for move in use_before] if use_before is", "AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import", "self.max_turns = max_turns self.drain = drain self.healing = healing self.crit_rate = crit_rate self.ailment_chance", "self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits:", "version_group: Dict[str, Any], ) -> None: self.accuracy = accuracy self.effect_chance = effect_chance self.power", "None self.super = ContestComboDetail(**super) if super is not None else None class ContestComboDetail(Resource):", "version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int, effect_chance: int, power: int, pp:", "healing: int, crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance: int, ) -> None:", "crit_rate: int ailment_chance: int flinch_chance: int stat_chance: int def __init__( self, *, ailment:", "from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource,", "int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any],", "Url from aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType,", "int, crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance: int, ) -> None: self.ailment", "aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name,", "ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance", "= [ MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target =", "List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int,", "-> None: self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource):", "Any], ) -> None: self.change = change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy:", "VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility", "[MinimalResource(**move) for move in use_before] if use_before is not None else None )", "if use_after is not None else None ) class MoveFlavorText(Resource): flavor_text: str language:", "is not None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__(", "MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine in machines]", "learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str, Any]], past_values:", "MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits self.min_turns = min_turns self.max_turns = max_turns", "int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int,", "self.min_turns = min_turns self.max_turns = max_turns self.drain = drain self.healing = healing self.crit_rate", "self.pp = pp self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in", "from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail,", "generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any], names:", "List[Dict[str, Any]], power: int, pp: int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str,", "List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int", "( ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import (", "int, power: int, pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str,", "move in use_before] if use_before is not None else None ) self.use_after =", "names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect:", "= effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes =", "int ailment_chance: int flinch_chance: int stat_chance: int def __init__( self, *, ailment: Dict[str,", "<gh_stars>1-10 from typing import TYPE_CHECKING, Any, Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect", "self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines", ") -> None: self.change = change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int", "normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str,", "MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language class Move(NamedResource): accuracy: int contest_combos: \"ContestComboSets\"", "super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], )", "learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine in machines] self.meta = MoveMetaData(**meta) self.names", "int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *,", "self.power = power self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in", "power: int, pp: int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target:", "Any]], meta: Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int, pp:", "self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int pp: int", "for effect_change in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries", "pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.type = MinimalResource(**type)", "priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any], type: Dict[str,", "Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]],", "[ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine in", "stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource):", "= flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category:", "MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int, effect_chance: int, power: int, pp: int,", "import AbilityEffectChange from aiopoke.objects.utility import ( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources", "= [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon =", "not None else None self.super = ContestComboDetail(**super) if super is not None else", "None ) class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self,", "class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text:", "= [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for", "List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\"", "machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int priority:", "] self.power = power self.pp = pp self.priority = priority self.stat_changes = [", "int, pp: int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str,", "Any]], super: Optional[Dict[str, Any]], ) -> None: self.normal = ContestComboDetail(**normal) if normal is", "Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.accuracy = accuracy self.effect_chance =", "healing: int crit_rate: int ailment_chance: int flinch_chance: int stat_chance: int def __init__( self,", "contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance:", "name in names] self.past_values = [ PastMoveStatValues(**past_value) for past_value in past_values ] self.power", "stat_chance: int, ) -> None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits =", "MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int max_turns: int", "int, flinch_chance: int, stat_chance: int, ) -> None: self.ailment = MinimalResource(**ailment) self.category =", "min_turns: int, max_turns: int, drain: int, healing: int, crit_rate: int, ailment_chance: int, flinch_chance:", "Any], version_group: Dict[str, Any], ) -> None: self.accuracy = accuracy self.effect_chance = effect_chance", "[MachineVersionDetail(**machine) for machine in machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name) for name", "power self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ]", "else None ) self.use_after = ( [MinimalResource(**move) for move in use_after] if use_after", "crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance: int, ) -> None: self.ailment =", "int crit_rate: int ailment_chance: int flinch_chance: int stat_chance: int def __init__( self, *,", "is not None else None ) class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group:", "Dict[str, Any], ) -> None: self.accuracy = accuracy self.effect_chance = effect_chance self.power =", "Any], category: Dict[str, Any], min_hits: int, max_hits: int, min_turns: int, max_turns: int, drain:", "None: self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment:", "Stat, VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from", "MinimalResource, Url from aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect,", "__init__( self, *, flavor_text: str, language: Dict[str, Any], version_group: Dict[str, Any], ) ->", "pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine in machines] self.meta =", "List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange from aiopoke.objects.utility import", "flavor_text_entry in flavor_text_entries ] self.generation = MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon", "MinimalResource[\"Stat\"] def __init__( self, *, change: int, stat: Dict[str, Any], ) -> None:", "max_turns self.drain = drain self.healing = healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance", "def __init__( self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) -> None:", "in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type) class", "MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self,", "= Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries =", "__init__( self, *, ailment: Dict[str, Any], category: Dict[str, Any], min_hits: int, max_hits: int,", "int effect_chance: int power: int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"]", "*, id: int, name: str, accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any],", "self, *, id: int, name: str, accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str,", "flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta:", "use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before = ( [MinimalResource(**move) for move in", "in use_before] if use_before is not None else None ) self.use_after = (", "MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry", "Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]],", "language: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.flavor_text = flavor_text self.language", "pp: int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any],", "effect_entries ] self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries =", "Any], ) -> None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect", "self.drain = drain self.healing = healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance", "normal is not None else None self.super = ContestComboDetail(**super) if super is not", "= MinimalResource(**generation) self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines =", "ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str, Any]], super:", "stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any], type: Dict[str, Any], )", "effect_chance: int, power: int, pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group:", "flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str, language:", "int stat_chance: int def __init__( self, *, ailment: Dict[str, Any], category: Dict[str, Any],", "List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.accuracy =", "*, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]], ) -> None: self.normal = ContestComboDetail(**normal)", "Dict[str, Any], target: Dict[str, Any], type: Dict[str, Any], ) -> None: super().__init__(id=id, name=name)", "Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [", "change self.stat = MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int pp:", "= [MachineVersionDetail(**machine) for machine in machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name) for", "NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import Resource if", "use_after] if use_after is not None else None ) class MoveFlavorText(Resource): flavor_text: str", "for machine in machines] self.meta = MoveMetaData(**meta) self.names = [Name(**name) for name in", "accuracy self.effect_chance = effect_chance self.power = power self.pp = pp self.effect_entries = [", "[ PastMoveStatValues(**past_value) for past_value in past_values ] self.power = power self.pp = pp", "Any]]], ) -> None: self.use_before = ( [MinimalResource(**move) for move in use_before] if", ") -> None: self.use_before = ( [MinimalResource(**move) for move in use_before] if use_before", "= MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__(", "past_values: List[\"PastMoveStatValues\"] power: int pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target:", "Dict[str, Any], ) -> None: self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group =", "= MoveMetaData(**meta) self.names = [Name(**name) for name in names] self.past_values = [ PastMoveStatValues(**past_value)", "language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str, language: Dict[str, Any],", "target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self, *, id: int, name: str, accuracy:", "version_group: Dict[str, Any], ) -> None: self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group", "Any], ) -> None: self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group)", "= MinimalResource(**stat) class PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int pp: int effect_entries:", "effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names:", "= ContestComboDetail(**super) if super is not None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]]", "Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str, Any]], super: Optional[Dict[str, Any]],", "int, stat: Dict[str, Any], ) -> None: self.change = change self.stat = MinimalResource(**stat)", "self.learned_by_pokemon = [ MinimalResource(**pokemon) for pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for", "self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance = stat_chance class", "self.effect_changes = [ AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry)", "aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass, MoveTarget, ) from aiopoke.objects.utility import Language class", "List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, accuracy: int, effect_chance: int,", "Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any],", "None ) self.use_after = ( [MinimalResource(**move) for move in use_after] if use_after is", "aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources import ( ContestEffect, ContestType, Generation, NaturalGiftType,", "Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str, Any]], flavor_text_entries: List[Dict[str, Any]], generation:", "category: Dict[str, Any], min_hits: int, max_hits: int, min_turns: int, max_turns: int, drain: int,", "= MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits self.min_turns =", "self.names = [Name(**name) for name in names] self.past_values = [ PastMoveStatValues(**past_value) for past_value", "accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect = Url(**contest_effect) self.contest_type = MinimalResource(**contest_type) self.damage_class = MinimalResource(**damage_class)", "*, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before = (", "= [Name(**name) for name in names] self.past_values = [ PastMoveStatValues(**past_value) for past_value in", "is not None else None ) self.use_after = ( [MinimalResource(**move) for move in", "List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str,", ") from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import Resource if TYPE_CHECKING: from", "use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], )", "self, *, flavor_text: str, language: Dict[str, Any], version_group: Dict[str, Any], ) -> None:", "Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str,", "Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment, MoveCategory, MoveDamageClass,", "= priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect =", "None else None ) self.use_after = ( [MinimalResource(**move) for move in use_after] if", "-> None: self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits =", "( MachineVersionDetail, Name, NamedResource, VerboseEffect, ) from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource", "if use_before is not None else None ) self.use_after = ( [MinimalResource(**move) for", "] self.machines = [MachineVersionDetail(**machine) for machine in machines] self.meta = MoveMetaData(**meta) self.names =", "str, accuracy: int, contest_combos: Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class:", ") -> None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos = ContestComboSets(**contest_combos) self.contest_effect =", "pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any], version_group: Dict[str, Any], ) ->", "Dict[str, Any], names: List[Dict[str, Any]], past_values: List[Dict[str, Any]], power: int, pp: int, priority:", "max_hits self.min_turns = min_turns self.max_turns = max_turns self.drain = drain self.healing = healing", "List[\"MachineVersionDetail\"] meta: \"MoveMetaData\" names: List[\"Name\"] past_values: List[\"PastMoveStatValues\"] power: int pp: int priority: int", "Any]], type: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.accuracy = accuracy", "ailment: Dict[str, Any], category: Dict[str, Any], min_hits: int, max_hits: int, min_turns: int, max_turns:", "accuracy: int effect_chance: int power: int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group:", "Dict[str, Any], contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int],", ") -> None: self.flavor_text = flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class", "min_hits: int, max_hits: int, min_turns: int, max_turns: int, drain: int, healing: int, crit_rate:", "self.max_hits = max_hits self.min_turns = min_turns self.max_turns = max_turns self.drain = drain self.healing", "self.effect_chance = effect_chance self.power = power self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry)", "int healing: int crit_rate: int ailment_chance: int flinch_chance: int stat_chance: int def __init__(", "int max_turns: int drain: int healing: int crit_rate: int ailment_chance: int flinch_chance: int", "change: int stat: MinimalResource[\"Stat\"] def __init__( self, *, change: int, stat: Dict[str, Any],", "None else None ) class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def", "super_contest_effect: Dict[str, Any], target: Dict[str, Any], type: Dict[str, Any], ) -> None: super().__init__(id=id,", "PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"]", "\"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"]", "AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry in", "-> None: self.normal = ContestComboDetail(**normal) if normal is not None else None self.super", "MinimalResource[\"Language\"] version_group: MinimalResource[\"VersionGroup\"] def __init__( self, *, flavor_text: str, language: Dict[str, Any], version_group:", "*, flavor_text: str, language: Dict[str, Any], version_group: Dict[str, Any], ) -> None: self.flavor_text", "MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits: int min_turns: int max_turns: int drain: int", "class PastMoveStatValues(Resource): accuracy: int effect_chance: int power: int pp: int effect_entries: List[\"VerboseEffect\"] type:", "use_after is not None else None ) class MoveFlavorText(Resource): flavor_text: str language: MinimalResource[\"Language\"]", "import TYPE_CHECKING, Any, Dict, List, Optional from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect from aiopoke.objects.resources.pokemon.ability import", "stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type)", "MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal: Optional[Dict[str,", "effect_chance self.power = power self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry", "effect_chance: int power: int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def", "power self.pp = pp self.priority = priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change", "flavor_text self.language = MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"]", "past_values: List[Dict[str, Any]], power: int, pp: int, priority: int, stat_changes: List[Dict[str, Any]], super_contest_effect:", "type: Dict[str, Any], ) -> None: super().__init__(id=id, name=name) self.accuracy = accuracy self.contest_combos =", "priority self.stat_changes = [ MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect)", "self.min_hits = min_hits self.max_hits = max_hits self.min_turns = min_turns self.max_turns = max_turns self.drain", "MinimalResource(**language) self.version_group = MinimalResource(**version_group) class MoveMetaData(Resource): ailment: MinimalResource[\"MoveAilment\"] category: MinimalResource[\"MoveCategory\"] min_hits: int max_hits:", "Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str, Any], names: List[Dict[str, Any]],", "from aiopoke.utils.minimal_resources import MinimalResource, Url from aiopoke.utils.resource import Resource if TYPE_CHECKING: from aiopoke.objects.resources", "MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"]", "contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes: List[Dict[str,", "int power: int pp: int effect_entries: List[\"VerboseEffect\"] type: MinimalResource[\"NaturalGiftType\"] version_group: MinimalResource[\"VersionGroup\"] def __init__(", "not None else None class ContestComboDetail(Resource): use_before: Optional[List[MinimalResource[\"Move\"]]] use_after: Optional[List[MinimalResource[\"Move\"]]] def __init__( self,", "self.ailment = MinimalResource(**ailment) self.category = MinimalResource(**category) self.min_hits = min_hits self.max_hits = max_hits self.min_turns", "accuracy: int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes:", "int, effect_chance: int, power: int, pp: int, effect_entries: List[Dict[str, Any]], type: Dict[str, Any],", "[ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.type = MinimalResource(**type) self.version_group = MinimalResource(**version_group)", "for pokemon in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine in machines] self.meta", "Any]], flavor_text_entries: List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]],", "= accuracy self.effect_chance = effect_chance self.power = power self.pp = pp self.effect_entries =", "int, healing: int, crit_rate: int, ailment_chance: int, flinch_chance: int, stat_chance: int, ) ->", "[Name(**name) for name in names] self.past_values = [ PastMoveStatValues(**past_value) for past_value in past_values", "names] self.past_values = [ PastMoveStatValues(**past_value) for past_value in past_values ] self.power = power", "( [MinimalResource(**move) for move in use_before] if use_before is not None else None", "Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any], type: Dict[str, Any], ) -> None:", "self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type = MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"]", "__init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None: self.use_before", "effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"] meta:", "Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str, Any]], effect_changes:", ") -> None: self.normal = ContestComboDetail(**normal) if normal is not None else None", "= stat_chance class MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__( self, *, change:", "effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines:", "contest_effect: Dict[str, Any], contest_type: Dict[str, Any], damage_class: Dict[str, Any], effect_chance: Optional[int], effect_entries: List[Dict[str,", "int drain: int healing: int crit_rate: int ailment_chance: int flinch_chance: int stat_chance: int", "in learned_by_pokemon ] self.machines = [MachineVersionDetail(**machine) for machine in machines] self.meta = MoveMetaData(**meta)", "drain: int healing: int crit_rate: int ailment_chance: int flinch_chance: int stat_chance: int def", "= MinimalResource(**type) class ContestComboSets(Resource): normal: Optional[\"ContestComboDetail\"] super: Optional[\"ContestComboDetail\"] def __init__( self, *, normal:", "Optional[List[Dict[str, Any]]], ) -> None: self.use_before = ( [MinimalResource(**move) for move in use_before]", "List[\"PastMoveStatValues\"] power: int pp: int priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"]", "for name in names] self.past_values = [ PastMoveStatValues(**past_value) for past_value in past_values ]", "= [ VerboseEffect(**effect_entry) for effect_entry in effect_entries ] self.type = MinimalResource(**type) self.version_group =", "List[Dict[str, Any]], generation: Dict[str, Any], learned_by_pokemon: List[Dict[str, Any]], machines: List[Dict[str, Any]], meta: Dict[str,", "self.damage_class = MinimalResource(**damage_class) self.effect_chance = effect_chance self.effect_entries = [ VerboseEffect(**effect_entry) for effect_entry in", "[MinimalResource(**move) for move in use_after] if use_after is not None else None )", "self.use_after = ( [MinimalResource(**move) for move in use_after] if use_after is not None", "= effect_chance self.power = power self.pp = pp self.effect_entries = [ VerboseEffect(**effect_entry) for", "= healing self.crit_rate = crit_rate self.ailment_chance = ailment_chance self.flinch_chance = flinch_chance self.stat_chance =", "def __init__( self, *, use_before: Optional[List[Dict[str, Any]]], use_after: Optional[List[Dict[str, Any]]], ) -> None:", "int, max_turns: int, drain: int, healing: int, crit_rate: int, ailment_chance: int, flinch_chance: int,", "priority: int stat_changes: List[\"MoveStatChange\"] super_contest_effect: Url[\"SuperContestEffect\"] target: MinimalResource[\"MoveTarget\"] type: MinimalResource[\"NaturalGiftType\"] def __init__( self,", "[ AbilityEffectChange(**effect_change) for effect_change in effect_changes ] self.flavor_text_entries = [ MoveFlavorText(**flavor_text_entry) for flavor_text_entry", "Optional[int] effect_changes: List[\"AbilityEffectChange\"] effect_entries: List[\"VerboseEffect\"] flavor_text_entries: List[\"MoveFlavorText\"] generation: MinimalResource[\"Generation\"] learned_by_pokemon: List[MinimalResource[\"Pokemon\"]] machines: List[\"MachineVersionDetail\"]", "int, stat_changes: List[Dict[str, Any]], super_contest_effect: Dict[str, Any], target: Dict[str, Any], type: Dict[str, Any],", "int contest_combos: \"ContestComboSets\" contest_effect: Url[\"ContestEffect\"] contest_type: MinimalResource[\"ContestType\"] damage_class: MinimalResource[\"MoveDamageClass\"] effect_chance: Optional[int] effect_changes: List[\"AbilityEffectChange\"]", "ContestEffect, ContestType, Generation, NaturalGiftType, Pokemon, Stat, VersionGroup, ) from aiopoke.objects.resources.moves import ( MoveAilment,", "min_hits self.max_hits = max_hits self.min_turns = min_turns self.max_turns = max_turns self.drain = drain", "else None self.super = ContestComboDetail(**super) if super is not None else None class", "MoveStatChange(**stat_change) for stat_change in stat_changes ] self.super_contest_effect = Url(**super_contest_effect) self.target = MinimalResource(**target) self.type", "MoveStatChange(Resource): change: int stat: MinimalResource[\"Stat\"] def __init__( self, *, change: int, stat: Dict[str,", "self.meta = MoveMetaData(**meta) self.names = [Name(**name) for name in names] self.past_values = [", "ailment_chance: int flinch_chance: int stat_chance: int def __init__( self, *, ailment: Dict[str, Any]," ]
[ "spam def TestSystem(): r = spam.system(\"ls -l\") assert r == 0 def TestNothingDone():", "r = spam.system(\"ls -l\") assert r == 0 def TestNothingDone(): r = spam.nothing_done()", "= spam.system(\"ls -l\") assert r == 0 def TestNothingDone(): r = spam.nothing_done() assert", "TestSystem(): r = spam.system(\"ls -l\") assert r == 0 def TestNothingDone(): r =", "-l\") assert r == 0 def TestNothingDone(): r = spam.nothing_done() assert r is", "spam.system(\"ls -l\") assert r == 0 def TestNothingDone(): r = spam.nothing_done() assert r", "import spam def TestSystem(): r = spam.system(\"ls -l\") assert r == 0 def", "assert r == 0 def TestNothingDone(): r = spam.nothing_done() assert r is None", "def TestSystem(): r = spam.system(\"ls -l\") assert r == 0 def TestNothingDone(): r" ]
[ "content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile,", "\"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match", "makefile target that upgrades our dependencies to newer released versions \"\"\" regex_pattern =", "of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results):", "from pytest_repo_health import add_key_to_metadata from repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def", "upgrade: makefile target that upgrades our dependencies to newer released versions \"\"\" regex_pattern", "= \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text content of Makefile\"\"\" full_path", "\"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match is not None:", "Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\"", "Makefile follows standards \"\"\" import re import os import pytest from pytest_repo_health import", "import re import os import pytest from pytest_repo_health import add_key_to_metadata from repo_health import", "\"\"\" import re import os import pytest from pytest_repo_health import add_key_to_metadata from repo_health", "add_key_to_metadata from repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing", "= os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile", "\"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that", "\"\"\" upgrade: makefile target that upgrades our dependencies to newer released versions \"\"\"", "that upgrades our dependencies to newer released versions \"\"\" regex_pattern = \"upgrade:\" match", "if Makefile follows standards \"\"\" import re import os import pytest from pytest_repo_health", "\"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that upgrades our dependencies to", "full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade:", "see if Makefile follows standards \"\"\" import re import os import pytest from", "def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that upgrades our dependencies to newer", "@pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text content of Makefile\"\"\" full_path = os.path.join(repo_path,", "return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that upgrades", "upgrades our dependencies to newer released versions \"\"\" regex_pattern = \"upgrade:\" match =", "get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that upgrades our", "check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that upgrades our dependencies to newer released", "standards \"\"\" import re import os import pytest from pytest_repo_health import add_key_to_metadata from", "import add_key_to_metadata from repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture", "the text content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\"))", "regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match is", "import os import pytest from pytest_repo_health import add_key_to_metadata from repo_health import get_file_content module_dict_key", "os import pytest from pytest_repo_health import add_key_to_metadata from repo_health import get_file_content module_dict_key =", "\"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text content of Makefile\"\"\" full_path =", "\"\"\"Fixture containing the text content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path)", "containing the text content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key,", "Checks to see if Makefile follows standards \"\"\" import re import os import", "follows standards \"\"\" import re import os import pytest from pytest_repo_health import add_key_to_metadata", "fixture_makefile(repo_path): \"\"\"Fixture containing the text content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return", "text content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def", "dependencies to newer released versions \"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile)", "\"\"\" Checks to see if Makefile follows standards \"\"\" import re import os", "released versions \"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False", "re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match is not None: all_results[module_dict_key][\"upgrade\"] = True", "= re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match is not None: all_results[module_dict_key][\"upgrade\"] =", "from repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the", "get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text content of", "module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text content of Makefile\"\"\"", "to see if Makefile follows standards \"\"\" import re import os import pytest", "to newer released versions \"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"]", "match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match is not None: all_results[module_dict_key][\"upgrade\"]", "our dependencies to newer released versions \"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern,", "repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text", "newer released versions \"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] =", "versions \"\"\" regex_pattern = \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if", "import pytest from pytest_repo_health import add_key_to_metadata from repo_health import get_file_content module_dict_key = \"makefile\"", "os.path.join(repo_path, \"Makefile\") return get_file_content(full_path) @add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target", "import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path): \"\"\"Fixture containing the text content", "pytest from pytest_repo_health import add_key_to_metadata from repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile')", "target that upgrades our dependencies to newer released versions \"\"\" regex_pattern = \"upgrade:\"", "pytest_repo_health import add_key_to_metadata from repo_health import get_file_content module_dict_key = \"makefile\" @pytest.fixture(name='makefile') def fixture_makefile(repo_path):", "@add_key_to_metadata((module_dict_key, \"upgrade\")) def check_has_upgrade(makefile, all_results): \"\"\" upgrade: makefile target that upgrades our dependencies", "all_results): \"\"\" upgrade: makefile target that upgrades our dependencies to newer released versions", "def fixture_makefile(repo_path): \"\"\"Fixture containing the text content of Makefile\"\"\" full_path = os.path.join(repo_path, \"Makefile\")", "re import os import pytest from pytest_repo_health import add_key_to_metadata from repo_health import get_file_content", "= \"upgrade:\" match = re.search(regex_pattern, makefile) all_results[module_dict_key][\"upgrade\"] = False if match is not" ]
[ "('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "[ ('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ]", "= [ ('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True,", "2021-02-11 06:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('infra',", "[ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami', models.CharField(max_length=100)),", "on 2021-02-11 06:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "06:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ] operations =", "] operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami', models.CharField(max_length=100)), ('instancetype', models.CharField(max_length=100)), ], ), ]", "operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)),", "'0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ] operations = [", "models class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel(", "name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami', models.CharField(max_length=100)), ('instancetype', models.CharField(max_length=100)),", "# Generated by Django 3.1.5 on 2021-02-11 06:19 from django.db import migrations, models", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami', models.CharField(max_length=100)), ('instancetype', models.CharField(max_length=100)), ],", "Generated by Django 3.1.5 on 2021-02-11 06:19 from django.db import migrations, models class", "Django 3.1.5 on 2021-02-11 06:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "<gh_stars>0 # Generated by Django 3.1.5 on 2021-02-11 06:19 from django.db import migrations,", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ] operations", "Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel', fields=[", "= [ migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami',", "migrations.CreateModel( name='InstanceModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami', models.CharField(max_length=100)), ('instancetype',", "by Django 3.1.5 on 2021-02-11 06:19 from django.db import migrations, models class Migration(migrations.Migration):", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('ami', models.CharField(max_length=100)), ('instancetype', models.CharField(max_length=100)), ], ),", "class Migration(migrations.Migration): dependencies = [ ('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel',", "dependencies = [ ('infra', '0002_keymodel_region'), ] operations = [ migrations.CreateModel( name='InstanceModel', fields=[ ('id',", "3.1.5 on 2021-02-11 06:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies =" ]
[ "greater than 0, False otherwise :rtype: bool \"\"\" return self.armour > 0 def", "return self.armour > 0 def hasShield(self) -> bool: \"\"\"Test whether this BattleShip has", "per ship for module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module def", "remaining shield HP. :param int new: Integer new amount of shield HP remaining;", "otherwise :rtype: bool \"\"\" return self.shield > 0 def hasCloaks(self) -> bool: \"\"\"Test", "self.EMPs = [] self.remainingCloak = 0 self.cloaking = False self.EMPCooldown = 0 #", "one cloak module per ship for module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks", "this BattleShip's hull HP is greater than 0, False otherwise :rtype: bool \"\"\"", "0 or more. :rtype: int \"\"\" return self.armour def getHull(self) -> int: \"\"\"Get", ":return: True if this BattleShip's armour HP is greater than 0, False otherwise", "hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip has any cloak modules equipped. :return:", ":return: True if this BattleShip has at least one cloak module equipped, False", "has any hull HP remaining. :return: True if this BattleShip's hull HP is", "amount of armour HP remaining; 0 or more. :rtype: int \"\"\" return self.armour", "= new def setArmour(self, new : int): \"\"\"Set this BattleShip's remaining armour HP.", "setShield(self, new : int): \"\"\"Set this BattleShip's remaining shield HP. :param int new:", "import bbCloakModule from ..items import bbShip class BattleShip: \"\"\"A class representing ships participting", "False otherwise :rtype: bool \"\"\" return len(self.cloaks) > 0 def getShield(self) -> int:", "HP is greater than 0, False otherwise :rtype: bool \"\"\" return self.armour >", "0 or more. \"\"\" self.armour = new def setHull(self, new : int): \"\"\"Set", "bool: \"\"\"Test whether this BattleShip has any cloak modules equipped. :return: True if", "hull HP remaining; 0 or more. :rtype: int \"\"\" return self.hull def setShield(self,", ":return: True if this BattleShip's shield HP is greater than 0, False otherwise", "BattleShip to inherit stats from \"\"\" self.bbShip = bbShip self.hull = bbShip.armour self.armour", ":rtype: bool \"\"\" return self.shield > 0 def hasCloaks(self) -> bool: \"\"\"Test whether", "True if this BattleShip's hull HP is greater than 0, False otherwise :rtype:", "is greater than 0, False otherwise :rtype: bool \"\"\" return self.shield > 0", "BattleShip's remaining shield HP. :param int new: Integer new amount of shield HP", ":param int new: Integer new amount of armour HP remaining; 0 or more.", "bbShip for this BattleShip to inherit stats from \"\"\" self.bbShip = bbShip self.hull", "HP is greater than 0, False otherwise :rtype: bool \"\"\" return self.hull >", "\"\"\" self.armour = new def setHull(self, new : int): \"\"\"Set this BattleShip's remaining", "hull HP. :return: Integer amount of hull HP remaining; 0 or more. :rtype:", "# TODO: Update to use only one cloak module per ship for module", ":rtype: int \"\"\" return self.shield def getArmour(self) -> int: \"\"\"Get this BattleShip's remaining", "bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs", ":rtype: int \"\"\" return self.hull def setShield(self, new : int): \"\"\"Set this BattleShip's", "0 # TODO: Update to use only one cloak module per ship for", "for this BattleShip to inherit stats from \"\"\" self.bbShip = bbShip self.hull =", "shield HP remaining; 0 or more. \"\"\" self.shield = new def setArmour(self, new", "-> bool: \"\"\"Test whether this BattleShip has any armour HP remaining. :return: True", "-> int: \"\"\"Get this BattleShip's remaining shield HP. :return: Integer amount of shield", "0, False otherwise :rtype: bool \"\"\" return self.armour > 0 def hasShield(self) ->", "remaining shield HP. :return: Integer amount of shield HP remaining; 0 or more.", "\"\"\" return self.armour def getHull(self) -> int: \"\"\"Get this BattleShip's remaining hull HP.", "+= module def hasHull(self) -> bool: \"\"\"Test whether this BattleShip has any hull", "bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks", "armour HP remaining; 0 or more. :rtype: int \"\"\" return self.armour def getHull(self)", "this BattleShip's remaining hull HP. :param int new: Integer new amount of hull", "\"\"\" return self.armour > 0 def hasShield(self) -> bool: \"\"\"Test whether this BattleShip", "= bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs = [] self.remainingCloak =", "= bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS()", "int \"\"\" return self.shield def getArmour(self) -> int: \"\"\"Get this BattleShip's remaining armour", "this BattleShip's remaining shield HP. :return: Integer amount of shield HP remaining; 0", "self.hull = bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps =", "remaining. :return: True if this BattleShip's shield HP is greater than 0, False", "\"\"\"Test whether this BattleShip has any shield HP remaining. :return: True if this", "def hasHull(self) -> bool: \"\"\"Test whether this BattleShip has any hull HP remaining.", "if this BattleShip's armour HP is greater than 0, False otherwise :rtype: bool", "0 def getShield(self) -> int: \"\"\"Get this BattleShip's remaining shield HP. :return: Integer", "BattleShip's remaining shield HP. :return: Integer amount of shield HP remaining; 0 or", "int: \"\"\"Get this BattleShip's remaining armour HP. :return: Integer amount of armour HP", "remaining armour HP. :param int new: Integer new amount of armour HP remaining;", "\"\"\" :param bbShip bbShip: The bbShip for this BattleShip to inherit stats from", "of shield HP remaining; 0 or more. \"\"\" self.shield = new def setArmour(self,", "-> bool: \"\"\"Test whether this BattleShip has any hull HP remaining. :return: True", "a duel. The ship has three health pools; hull, armour and shield. \"\"\"", "equipped, False otherwise :rtype: bool \"\"\" return len(self.cloaks) > 0 def getShield(self) ->", "duel. The ship has three health pools; hull, armour and shield. \"\"\" def", "participting in a duel. The ship has three health pools; hull, armour and", "\"\"\"Set this BattleShip's remaining shield HP. :param int new: Integer new amount of", "Integer new amount of armour HP remaining; 0 or more. \"\"\" self.armour =", "shield HP. :return: Integer amount of shield HP remaining; 0 or more. :rtype:", "bool \"\"\" return self.armour > 0 def hasShield(self) -> bool: \"\"\"Test whether this", "0 or more. :rtype: int \"\"\" return self.hull def setShield(self, new : int):", "greater than 0, False otherwise :rtype: bool \"\"\" return self.hull > 0 def", "True if this BattleShip's armour HP is greater than 0, False otherwise :rtype:", "inherit stats from \"\"\" self.bbShip = bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour()", "self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs = [] self.remainingCloak = 0 self.cloaking", "bool: \"\"\"Test whether this BattleShip has any armour HP remaining. :return: True if", "module equipped, False otherwise :rtype: bool \"\"\" return len(self.cloaks) > 0 def getShield(self)", "def setHull(self, new : int): \"\"\"Set this BattleShip's remaining hull HP. :param int", "whether this BattleShip has any cloak modules equipped. :return: True if this BattleShip", "this BattleShip to inherit stats from \"\"\" self.bbShip = bbShip self.hull = bbShip.armour", "bbShip: The bbShip for this BattleShip to inherit stats from \"\"\" self.bbShip =", "from ..items import bbShip class BattleShip: \"\"\"A class representing ships participting in a", "otherwise :rtype: bool \"\"\" return self.hull > 0 def hasArmour(self) -> bool: \"\"\"Test", "HP remaining. :return: True if this BattleShip's armour HP is greater than 0,", "is greater than 0, False otherwise :rtype: bool \"\"\" return self.hull > 0", "armour HP. :param int new: Integer new amount of armour HP remaining; 0", "whether this BattleShip has any shield HP remaining. :return: True if this BattleShip's", "greater than 0, False otherwise :rtype: bool \"\"\" return self.shield > 0 def", "setArmour(self, new : int): \"\"\"Set this BattleShip's remaining armour HP. :param int new:", "-> bool: \"\"\"Test whether this BattleShip has any cloak modules equipped. :return: True", "any hull HP remaining. :return: True if this BattleShip's hull HP is greater", "self.remainingCloak = 0 self.cloaking = False self.EMPCooldown = 0 # TODO: Update to", "new : int): \"\"\"Set this BattleShip's remaining hull HP. :param int new: Integer", "if this BattleShip has at least one cloak module equipped, False otherwise :rtype:", "HP remaining; 0 or more. :rtype: int \"\"\" return self.hull def setShield(self, new", "this BattleShip has any hull HP remaining. :return: True if this BattleShip's hull", "more. \"\"\" self.armour = new def setHull(self, new : int): \"\"\"Set this BattleShip's", "this BattleShip has any shield HP remaining. :return: True if this BattleShip's shield", "setHull(self, new : int): \"\"\"Set this BattleShip's remaining hull HP. :param int new:", "HP. :return: Integer amount of shield HP remaining; 0 or more. :rtype: int", "this BattleShip has at least one cloak module equipped, False otherwise :rtype: bool", "\"\"\" return self.shield > 0 def hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip", "this BattleShip has any armour HP remaining. :return: True if this BattleShip's armour", "remaining. :return: True if this BattleShip's hull HP is greater than 0, False", "BattleShip: \"\"\"A class representing ships participting in a duel. The ship has three", "-> bool: \"\"\"Test whether this BattleShip has any shield HP remaining. :return: True", "hull HP is greater than 0, False otherwise :rtype: bool \"\"\" return self.hull", "stats from \"\"\" self.bbShip = bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour() -", "if isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self) -> bool: \"\"\"Test whether this", "this BattleShip's armour HP is greater than 0, False otherwise :rtype: bool \"\"\"", "than 0, False otherwise :rtype: bool \"\"\" return self.hull > 0 def hasArmour(self)", "bool: \"\"\"Test whether this BattleShip has any shield HP remaining. :return: True if", "BattleShip has at least one cloak module equipped, False otherwise :rtype: bool \"\"\"", "modules equipped. :return: True if this BattleShip has at least one cloak module", ":return: Integer amount of armour HP remaining; 0 or more. :rtype: int \"\"\"", "than 0, False otherwise :rtype: bool \"\"\" return self.armour > 0 def hasShield(self)", "int): \"\"\"Set this BattleShip's remaining shield HP. :param int new: Integer new amount", "int: \"\"\"Get this BattleShip's remaining shield HP. :return: Integer amount of shield HP", "[] self.remainingCloak = 0 self.cloaking = False self.EMPCooldown = 0 # TODO: Update", "bbShip.getDPS() self.cloaks = [] self.EMPs = [] self.remainingCloak = 0 self.cloaking = False", ":rtype: bool \"\"\" return len(self.cloaks) > 0 def getShield(self) -> int: \"\"\"Get this", "bool \"\"\" return self.hull > 0 def hasArmour(self) -> bool: \"\"\"Test whether this", "\"\"\" return self.hull > 0 def hasArmour(self) -> bool: \"\"\"Test whether this BattleShip", "def hasArmour(self) -> bool: \"\"\"Test whether this BattleShip has any armour HP remaining.", "whether this BattleShip has any armour HP remaining. :return: True if this BattleShip's", ": int): \"\"\"Set this BattleShip's remaining shield HP. :param int new: Integer new", "to use only one cloak module per ship for module in bbShip.modules: if", "> 0 def hasArmour(self) -> bool: \"\"\"Test whether this BattleShip has any armour", ":return: Integer amount of hull HP remaining; 0 or more. :rtype: int \"\"\"", "ships participting in a duel. The ship has three health pools; hull, armour", "len(self.cloaks) > 0 def getShield(self) -> int: \"\"\"Get this BattleShip's remaining shield HP.", "pools; hull, armour and shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param", "new : int): \"\"\"Set this BattleShip's remaining shield HP. :param int new: Integer", "if this BattleShip's hull HP is greater than 0, False otherwise :rtype: bool", "representing ships participting in a duel. The ship has three health pools; hull,", "is greater than 0, False otherwise :rtype: bool \"\"\" return self.armour > 0", "def hasShield(self) -> bool: \"\"\"Test whether this BattleShip has any shield HP remaining.", ":param bbShip bbShip: The bbShip for this BattleShip to inherit stats from \"\"\"", "int new: Integer new amount of hull HP remaining; 0 or more. \"\"\"", "self.armour = new def setHull(self, new : int): \"\"\"Set this BattleShip's remaining hull", "module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self) -> bool:", "= bbShip.getDPS() self.cloaks = [] self.EMPs = [] self.remainingCloak = 0 self.cloaking =", "otherwise :rtype: bool \"\"\" return len(self.cloaks) > 0 def getShield(self) -> int: \"\"\"Get", "def getHull(self) -> int: \"\"\"Get this BattleShip's remaining hull HP. :return: Integer amount", "any shield HP remaining. :return: True if this BattleShip's shield HP is greater", "return self.shield def getArmour(self) -> int: \"\"\"Get this BattleShip's remaining armour HP. :return:", "..items import bbShip class BattleShip: \"\"\"A class representing ships participting in a duel.", "\"\"\"Test whether this BattleShip has any hull HP remaining. :return: True if this", "\"\"\"A class representing ships participting in a duel. The ship has three health", ":param int new: Integer new amount of hull HP remaining; 0 or more.", ":rtype: bool \"\"\" return self.hull > 0 def hasArmour(self) -> bool: \"\"\"Test whether", "\"\"\" return self.shield def getArmour(self) -> int: \"\"\"Get this BattleShip's remaining armour HP.", "0, False otherwise :rtype: bool \"\"\" return self.shield > 0 def hasCloaks(self) ->", ":rtype: int \"\"\" return self.armour def getHull(self) -> int: \"\"\"Get this BattleShip's remaining", "bbCloakModule): self.cloaks += module def hasHull(self) -> bool: \"\"\"Test whether this BattleShip has", "new : int): \"\"\"Set this BattleShip's remaining armour HP. :param int new: Integer", "Integer amount of hull HP remaining; 0 or more. :rtype: int \"\"\" return", "self.bbShip = bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield =", "new amount of armour HP remaining; 0 or more. \"\"\" self.armour = new", "self.hull def setShield(self, new : int): \"\"\"Set this BattleShip's remaining shield HP. :param", "ship for module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self)", "\"\"\" return self.hull def setShield(self, new : int): \"\"\"Set this BattleShip's remaining shield", "HP remaining. :return: True if this BattleShip's shield HP is greater than 0,", "int new: Integer new amount of armour HP remaining; 0 or more. \"\"\"", "hasArmour(self) -> bool: \"\"\"Test whether this BattleShip has any armour HP remaining. :return:", "HP. :param int new: Integer new amount of shield HP remaining; 0 or", "..items.modules import bbCloakModule from ..items import bbShip class BattleShip: \"\"\"A class representing ships", "armour HP remaining. :return: True if this BattleShip's armour HP is greater than", "= 0 self.cloaking = False self.EMPCooldown = 0 # TODO: Update to use", "armour HP is greater than 0, False otherwise :rtype: bool \"\"\" return self.armour", "def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip for this", "BattleShip's hull HP is greater than 0, False otherwise :rtype: bool \"\"\" return", "int): \"\"\"Set this BattleShip's remaining armour HP. :param int new: Integer new amount", "= bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = []", "int: \"\"\"Get this BattleShip's remaining hull HP. :return: Integer amount of hull HP", "this BattleShip's remaining shield HP. :param int new: Integer new amount of shield", "hull HP remaining. :return: True if this BattleShip's hull HP is greater than", "0 self.cloaking = False self.EMPCooldown = 0 # TODO: Update to use only", "module per ship for module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module", "HP is greater than 0, False otherwise :rtype: bool \"\"\" return self.shield >", "bbCloakModule from ..items import bbShip class BattleShip: \"\"\"A class representing ships participting in", "least one cloak module equipped, False otherwise :rtype: bool \"\"\" return len(self.cloaks) >", "if this BattleShip's shield HP is greater than 0, False otherwise :rtype: bool", "cloak module equipped, False otherwise :rtype: bool \"\"\" return len(self.cloaks) > 0 def", "otherwise :rtype: bool \"\"\" return self.armour > 0 def hasShield(self) -> bool: \"\"\"Test", ":param int new: Integer new amount of shield HP remaining; 0 or more.", "BattleShip's armour HP is greater than 0, False otherwise :rtype: bool \"\"\" return", "self.armour def getHull(self) -> int: \"\"\"Get this BattleShip's remaining hull HP. :return: Integer", "-> int: \"\"\"Get this BattleShip's remaining armour HP. :return: Integer amount of armour", "int \"\"\" return self.hull def setShield(self, new : int): \"\"\"Set this BattleShip's remaining", "TODO: Update to use only one cloak module per ship for module in", "any armour HP remaining. :return: True if this BattleShip's armour HP is greater", "equipped. :return: True if this BattleShip has at least one cloak module equipped,", "= 0 # TODO: Update to use only one cloak module per ship", "Update to use only one cloak module per ship for module in bbShip.modules:", "or more. \"\"\" self.armour = new def setHull(self, new : int): \"\"\"Set this", "[] self.EMPs = [] self.remainingCloak = 0 self.cloaking = False self.EMPCooldown = 0", "0 def hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip has any cloak modules", "one cloak module equipped, False otherwise :rtype: bool \"\"\" return len(self.cloaks) > 0", "from ..items.modules import bbCloakModule from ..items import bbShip class BattleShip: \"\"\"A class representing", "new def setHull(self, new : int): \"\"\"Set this BattleShip's remaining hull HP. :param", "= bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield = bbShip.getShield()", "return self.hull > 0 def hasArmour(self) -> bool: \"\"\"Test whether this BattleShip has", "this BattleShip's shield HP is greater than 0, False otherwise :rtype: bool \"\"\"", "module def hasHull(self) -> bool: \"\"\"Test whether this BattleShip has any hull HP", "new def setArmour(self, new : int): \"\"\"Set this BattleShip's remaining armour HP. :param", "Integer amount of armour HP remaining; 0 or more. :rtype: int \"\"\" return", "bool \"\"\" return len(self.cloaks) > 0 def getShield(self) -> int: \"\"\"Get this BattleShip's", "has at least one cloak module equipped, False otherwise :rtype: bool \"\"\" return", "self.cloaks += module def hasHull(self) -> bool: \"\"\"Test whether this BattleShip has any", "remaining; 0 or more. \"\"\" self.armour = new def setHull(self, new : int):", "self.shield > 0 def hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip has any", "shield HP. :param int new: Integer new amount of shield HP remaining; 0", "has three health pools; hull, armour and shield. \"\"\" def __init__(self, bbShip :", "remaining hull HP. :return: Integer amount of hull HP remaining; 0 or more.", "int new: Integer new amount of shield HP remaining; 0 or more. \"\"\"", ":return: True if this BattleShip's hull HP is greater than 0, False otherwise", "or more. :rtype: int \"\"\" return self.hull def setShield(self, new : int): \"\"\"Set", "self.cloaks = [] self.EMPs = [] self.remainingCloak = 0 self.cloaking = False self.EMPCooldown", "\"\"\"Test whether this BattleShip has any armour HP remaining. :return: True if this", "False otherwise :rtype: bool \"\"\" return self.shield > 0 def hasCloaks(self) -> bool:", "this BattleShip's remaining hull HP. :return: Integer amount of hull HP remaining; 0", "self.shield def getArmour(self) -> int: \"\"\"Get this BattleShip's remaining armour HP. :return: Integer", "ship has three health pools; hull, armour and shield. \"\"\" def __init__(self, bbShip", "return self.armour def getHull(self) -> int: \"\"\"Get this BattleShip's remaining hull HP. :return:", "use only one cloak module per ship for module in bbShip.modules: if isinstance(module,", "Integer new amount of shield HP remaining; 0 or more. \"\"\" self.shield =", "\"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip for", "new amount of hull HP remaining; 0 or more. \"\"\" self.hull = new", "HP remaining. :return: True if this BattleShip's hull HP is greater than 0,", "\"\"\" self.shield = new def setArmour(self, new : int): \"\"\"Set this BattleShip's remaining", ":return: Integer amount of shield HP remaining; 0 or more. :rtype: int \"\"\"", "only one cloak module per ship for module in bbShip.modules: if isinstance(module, bbCloakModule):", "this BattleShip has any cloak modules equipped. :return: True if this BattleShip has", "more. :rtype: int \"\"\" return self.shield def getArmour(self) -> int: \"\"\"Get this BattleShip's", "BattleShip's remaining hull HP. :param int new: Integer new amount of hull HP", "shield HP is greater than 0, False otherwise :rtype: bool \"\"\" return self.shield", "return len(self.cloaks) > 0 def getShield(self) -> int: \"\"\"Get this BattleShip's remaining shield", "armour HP. :return: Integer amount of armour HP remaining; 0 or more. :rtype:", "more. :rtype: int \"\"\" return self.hull def setShield(self, new : int): \"\"\"Set this", "0, False otherwise :rtype: bool \"\"\" return self.hull > 0 def hasArmour(self) ->", "or more. :rtype: int \"\"\" return self.shield def getArmour(self) -> int: \"\"\"Get this", "remaining; 0 or more. \"\"\" self.shield = new def setArmour(self, new : int):", "class BattleShip: \"\"\"A class representing ships participting in a duel. The ship has", "BattleShip has any shield HP remaining. :return: True if this BattleShip's shield HP", "True if this BattleShip has at least one cloak module equipped, False otherwise", "False otherwise :rtype: bool \"\"\" return self.hull > 0 def hasArmour(self) -> bool:", "- self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs =", "int): \"\"\"Set this BattleShip's remaining hull HP. :param int new: Integer new amount", "new: Integer new amount of hull HP remaining; 0 or more. \"\"\" self.hull", "cloak module per ship for module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks +=", "> 0 def hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip has any cloak", "> 0 def getShield(self) -> int: \"\"\"Get this BattleShip's remaining shield HP. :return:", "BattleShip has any hull HP remaining. :return: True if this BattleShip's hull HP", "BattleShip has any cloak modules equipped. :return: True if this BattleShip has at", "> 0 def hasShield(self) -> bool: \"\"\"Test whether this BattleShip has any shield", "\"\"\" self.bbShip = bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield", "self.cloaking = False self.EMPCooldown = 0 # TODO: Update to use only one", "new: Integer new amount of shield HP remaining; 0 or more. \"\"\" self.shield", "than 0, False otherwise :rtype: bool \"\"\" return self.shield > 0 def hasCloaks(self)", "self.hull > 0 def hasArmour(self) -> bool: \"\"\"Test whether this BattleShip has any", "HP. :return: Integer amount of armour HP remaining; 0 or more. :rtype: int", "return self.hull def setShield(self, new : int): \"\"\"Set this BattleShip's remaining shield HP.", ": bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip for this BattleShip to inherit", "armour HP remaining; 0 or more. \"\"\" self.armour = new def setHull(self, new", "HP remaining; 0 or more. :rtype: int \"\"\" return self.shield def getArmour(self) ->", "any cloak modules equipped. :return: True if this BattleShip has at least one", "for module in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self) ->", "remaining; 0 or more. :rtype: int \"\"\" return self.hull def setShield(self, new :", "HP. :param int new: Integer new amount of hull HP remaining; 0 or", "shield HP remaining. :return: True if this BattleShip's shield HP is greater than", "has any armour HP remaining. :return: True if this BattleShip's armour HP is", "def setArmour(self, new : int): \"\"\"Set this BattleShip's remaining armour HP. :param int", "= [] self.remainingCloak = 0 self.cloaking = False self.EMPCooldown = 0 # TODO:", "\"\"\"Get this BattleShip's remaining shield HP. :return: Integer amount of shield HP remaining;", "remaining hull HP. :param int new: Integer new amount of hull HP remaining;", "to inherit stats from \"\"\" self.bbShip = bbShip self.hull = bbShip.armour self.armour =", "hasHull(self) -> bool: \"\"\"Test whether this BattleShip has any hull HP remaining. :return:", "-> int: \"\"\"Get this BattleShip's remaining hull HP. :return: Integer amount of hull", "remaining; 0 or more. :rtype: int \"\"\" return self.armour def getHull(self) -> int:", "HP remaining; 0 or more. \"\"\" self.shield = new def setArmour(self, new :", ": int): \"\"\"Set this BattleShip's remaining hull HP. :param int new: Integer new", "three health pools; hull, armour and shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip):", "bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip for this BattleShip to", "bbShip bbShip: The bbShip for this BattleShip to inherit stats from \"\"\" self.bbShip", "self.armour > 0 def hasShield(self) -> bool: \"\"\"Test whether this BattleShip has any", "in a duel. The ship has three health pools; hull, armour and shield.", "has any cloak modules equipped. :return: True if this BattleShip has at least", "self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs = []", "remaining. :return: True if this BattleShip's armour HP is greater than 0, False", "hull HP. :param int new: Integer new amount of hull HP remaining; 0", "or more. \"\"\" self.shield = new def setArmour(self, new : int): \"\"\"Set this", "remaining; 0 or more. :rtype: int \"\"\" return self.shield def getArmour(self) -> int:", "\"\"\"Set this BattleShip's remaining hull HP. :param int new: Integer new amount of", "bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps", "= False self.EMPCooldown = 0 # TODO: Update to use only one cloak", "this BattleShip's remaining armour HP. :param int new: Integer new amount of armour", "BattleShip's remaining armour HP. :param int new: Integer new amount of armour HP", "bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs = [] self.remainingCloak = 0", "False otherwise :rtype: bool \"\"\" return self.armour > 0 def hasShield(self) -> bool:", "hull, armour and shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip", "at least one cloak module equipped, False otherwise :rtype: bool \"\"\" return len(self.cloaks)", "__init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip for this BattleShip", "getShield(self) -> int: \"\"\"Get this BattleShip's remaining shield HP. :return: Integer amount of", "bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self) -> bool: \"\"\"Test whether", "0 def hasShield(self) -> bool: \"\"\"Test whether this BattleShip has any shield HP", "shield HP remaining; 0 or more. :rtype: int \"\"\" return self.shield def getArmour(self)", "isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self) -> bool: \"\"\"Test whether this BattleShip", "0 or more. \"\"\" self.shield = new def setArmour(self, new : int): \"\"\"Set", "self.EMPCooldown = 0 # TODO: Update to use only one cloak module per", "armour and shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip:", "HP. :return: Integer amount of hull HP remaining; 0 or more. :rtype: int", "HP remaining; 0 or more. :rtype: int \"\"\" return self.armour def getHull(self) ->", "and shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip: The", "more. :rtype: int \"\"\" return self.armour def getHull(self) -> int: \"\"\"Get this BattleShip's", "new: Integer new amount of armour HP remaining; 0 or more. \"\"\" self.armour", "bbShip class BattleShip: \"\"\"A class representing ships participting in a duel. The ship", ": int): \"\"\"Set this BattleShip's remaining armour HP. :param int new: Integer new", "Integer new amount of hull HP remaining; 0 or more. \"\"\" self.hull =", "shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip", "whether this BattleShip has any hull HP remaining. :return: True if this BattleShip's", "BattleShip has any armour HP remaining. :return: True if this BattleShip's armour HP", "False self.EMPCooldown = 0 # TODO: Update to use only one cloak module", "\"\"\" return len(self.cloaks) > 0 def getShield(self) -> int: \"\"\"Get this BattleShip's remaining", "class representing ships participting in a duel. The ship has three health pools;", "health pools; hull, armour and shield. \"\"\" def __init__(self, bbShip : bbShip.bbShip): \"\"\"", "getHull(self) -> int: \"\"\"Get this BattleShip's remaining hull HP. :return: Integer amount of", "has any shield HP remaining. :return: True if this BattleShip's shield HP is", "more. \"\"\" self.shield = new def setArmour(self, new : int): \"\"\"Set this BattleShip's", "HP remaining; 0 or more. \"\"\" self.armour = new def setHull(self, new :", "of armour HP remaining; 0 or more. :rtype: int \"\"\" return self.armour def", "of armour HP remaining; 0 or more. \"\"\" self.armour = new def setHull(self,", "int \"\"\" return self.armour def getHull(self) -> int: \"\"\"Get this BattleShip's remaining hull", "import bbShip class BattleShip: \"\"\"A class representing ships participting in a duel. The", "remaining armour HP. :return: Integer amount of armour HP remaining; 0 or more.", "Integer amount of shield HP remaining; 0 or more. :rtype: int \"\"\" return", "getArmour(self) -> int: \"\"\"Get this BattleShip's remaining armour HP. :return: Integer amount of", "self.armour = bbShip.getArmour() - self.hull self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks =", "def hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip has any cloak modules equipped.", "def setShield(self, new : int): \"\"\"Set this BattleShip's remaining shield HP. :param int", "= new def setHull(self, new : int): \"\"\"Set this BattleShip's remaining hull HP.", "new amount of shield HP remaining; 0 or more. \"\"\" self.shield = new", "this BattleShip's remaining armour HP. :return: Integer amount of armour HP remaining; 0", "or more. :rtype: int \"\"\" return self.armour def getHull(self) -> int: \"\"\"Get this", "HP. :param int new: Integer new amount of armour HP remaining; 0 or", "self.shield = new def setArmour(self, new : int): \"\"\"Set this BattleShip's remaining armour", "return self.shield > 0 def hasCloaks(self) -> bool: \"\"\"Test whether this BattleShip has", "\"\"\"Get this BattleShip's remaining hull HP. :return: Integer amount of hull HP remaining;", "in bbShip.modules: if isinstance(module, bbCloakModule): self.cloaks += module def hasHull(self) -> bool: \"\"\"Test", "BattleShip's remaining armour HP. :return: Integer amount of armour HP remaining; 0 or", "of hull HP remaining; 0 or more. :rtype: int \"\"\" return self.hull def", "True if this BattleShip's shield HP is greater than 0, False otherwise :rtype:", "cloak modules equipped. :return: True if this BattleShip has at least one cloak", "bool \"\"\" return self.shield > 0 def hasCloaks(self) -> bool: \"\"\"Test whether this", "def getArmour(self) -> int: \"\"\"Get this BattleShip's remaining armour HP. :return: Integer amount", "amount of shield HP remaining; 0 or more. :rtype: int \"\"\" return self.shield", "BattleShip's shield HP is greater than 0, False otherwise :rtype: bool \"\"\" return", "0 def hasArmour(self) -> bool: \"\"\"Test whether this BattleShip has any armour HP", "amount of armour HP remaining; 0 or more. \"\"\" self.armour = new def", ":rtype: bool \"\"\" return self.armour > 0 def hasShield(self) -> bool: \"\"\"Test whether", "The bbShip for this BattleShip to inherit stats from \"\"\" self.bbShip = bbShip", "0 or more. :rtype: int \"\"\" return self.shield def getArmour(self) -> int: \"\"\"Get", "from \"\"\" self.bbShip = bbShip self.hull = bbShip.armour self.armour = bbShip.getArmour() - self.hull", "\"\"\"Set this BattleShip's remaining armour HP. :param int new: Integer new amount of", "The ship has three health pools; hull, armour and shield. \"\"\" def __init__(self,", "= [] self.EMPs = [] self.remainingCloak = 0 self.cloaking = False self.EMPCooldown =", "amount of shield HP remaining; 0 or more. \"\"\" self.shield = new def", "BattleShip's remaining hull HP. :return: Integer amount of hull HP remaining; 0 or", "hasShield(self) -> bool: \"\"\"Test whether this BattleShip has any shield HP remaining. :return:", "amount of hull HP remaining; 0 or more. :rtype: int \"\"\" return self.hull", "of shield HP remaining; 0 or more. :rtype: int \"\"\" return self.shield def", "\"\"\"Test whether this BattleShip has any cloak modules equipped. :return: True if this", "\"\"\"Get this BattleShip's remaining armour HP. :return: Integer amount of armour HP remaining;", "self.shield = bbShip.getShield() self.dps = bbShip.getDPS() self.cloaks = [] self.EMPs = [] self.remainingCloak", "bool: \"\"\"Test whether this BattleShip has any hull HP remaining. :return: True if", "bbShip.bbShip): \"\"\" :param bbShip bbShip: The bbShip for this BattleShip to inherit stats", "def getShield(self) -> int: \"\"\"Get this BattleShip's remaining shield HP. :return: Integer amount" ]
[ "= None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList", "DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto", "File path from which to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto.", "def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a", "ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] = j for j, cross_feature in enumerate(self._statistics.cross_features):", "Unless required by applicable law or agreed to in writing, software # distributed", "slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.') def load_statistics(", "arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING", "DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path)", "specific slice. Raises: ValueError: If the input statistics proto does not have the", "'struct_stats': return self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if", "input statistics proto does not have the specified slice statistics. \"\"\" for slice_stats", "name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the", "have deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path],", "Exception as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics:", "common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats", "self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics", "stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific slice.", "feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto =", "statistics associated with a specific slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key:", "in Proto Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If the input", "ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized = True def proto(self)", "Raises: ValueError if multiple derived features match. \"\"\" # TODO(b/221453427): Consider indexing if", "-> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific slice. Args: stats: A DatasetFeatureStatisticsList", "in self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate slice name %s' % dataset.name)", "available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]:", "def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file. Args:", "value decoded as utf-8, or None if it cannot be decoded. Args: value:", "= stats_proto self._slice_map = {} # type: Dict[str, DatasetView] self._initialized = False def", "statistics file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result", "one path. Returns: A FeatureView, or None if feature_id is not present. \"\"\"", "value.decode('utf-8') except UnicodeError: return None return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]:", "is None == input_paths is None: raise ValueError('Must provide one of input_paths_prefix, input_paths.')", "Data statistics file path. The file should be a one-record TFRecord file or", "the expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of", "statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath,", "Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result = None for stat in self._statistics.custom_stats:", "\"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name to have deterministic", "# type: Dict[types.FeaturePath, int] self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int]", "features match. \"\"\" # TODO(b/221453427): Consider indexing if performance becomes an issue. results", "if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice", "plain file containing the statistics proto in Proto Text Format. Returns: A DatasetFeatureStatisticsList", "2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the", "of the dict is the name of the custom statistic and the value", "to have deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name])", "proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats,", "feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if index is None: return", "Proto Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If the input path", "'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object):", "of the specific slice. Raises: ValueError: If the input statistics proto does not", "Iterable[str] consisting of path steps, or a str, which is converted to a", "index is None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "stats_proto) return stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList", "if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match =", "internal indices. Noop if already initialized.\"\"\" if self._initialized: return field_identifier = None for", "= stat else: raise ValueError('Duplicate custom_stats for name %s' % name) return result", "proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it) raise ValueError('load_stats_tfrecord", "load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str):", "and # limitations under the License. \"\"\"Utilities for stats generators.\"\"\" from __future__ import", "-> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] =", "proto. Raises: IOError: If the input path does not exist. \"\"\" if not", "_NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from", "ordering feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto", "\"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve", "e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature", "name to have deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto", "if input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter =", "None for stat in self._statistics.custom_stats: if stat.name == name: if result is None:", "return stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from", "import print_function import logging from typing import Dict, Iterable, Optional, Sequence, Text, Tuple,", "DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of the slice. Returns: Statistics of the", "utf-8, or None if it cannot be decoded. Args: value: The bytes value", "record.') except StopIteration: return result except Exception as e: raise e def get_feature_stats(stats:", "if len(self._slice_map) == 1: for _, v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY,", "should be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if", "type is not supported. \"\"\" if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise", "Features specified within the underlying proto by name (instead of path) are normalized", "cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate", "LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the", "'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\"", "def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the", "in the feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get", "either path or name within a' ' Dataset.') if field_identifier == 'name': feature_id", "datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {} #", "(instead of path) are normalized to a length 1 path, and can be", "str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result = None for stat", "Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto)", "to support future extension of the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics", "if which == 'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if", "from typing import Dict, Iterable, Optional, Sequence, Text, Tuple, Union import numpy as", "don't need an index. if self._initialized: return for dataset in self._statistics.datasets: if dataset.name", "as a plain ' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath,", "self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics", "stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats:", "0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics", "to obtain from the feature statistics proto. Returns: The custom statistic. Raises: TypeError:", "== 'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats if which ==", "class DatasetView(object): \"\"\"View of statistics for a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics):", "DatasetListView(object): \"\"\"View of statistics for multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics", "\"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath])", "proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics:", "statistics (e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\"", "None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the", "feature %s' % feature_id) self._feature_map[feature_id] = j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id", "\"\"\" if input_path_prefix is None == input_paths is None: raise ValueError('Must provide one", "'num_stats': return self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if", "Matches validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises: ValueError if multiple derived features", "return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str,", "feature_stats raise ValueError('Feature %s not found in the dataset statistics.' % feature_path) def", "the expected type. ValueError: If the custom statistic is not found in the", "self._cross_feature_map[feature_id] = j self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying", "return self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\"", "statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s, should", "proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text,", "feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics", "-> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one feature. Args: stats_values: A Dict[str,float]", "available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]:", "def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow", "The value decoded as utf-8, or None, if the value cannot be decoded.", "feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not", "isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for multiple", "import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl import", "\"\"\" for slice_stats in stats.datasets: if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats)", "-> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord file. Args: input_path: Data statistics", "be a ' '(Large)List<primitive|struct> or null, but feature {} ' 'was {}.'.format(feature_path, arrow_type))", "from __future__ import division from __future__ import print_function import logging from typing import", "path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it)", "-> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type. Args: feature_path: path of the", "stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of the slice. Returns: Statistics", "feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The name of the custom statistics to", "path) are normalized to a length 1 path, and can be referred to", "dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name to have", "as utf-8, or None, if the value cannot be decoded. \"\"\" try: decoded_value", "load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in text", "'Mutual Information': 0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto containing the custom", "statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific slice. Args:", "deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result", "-> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_feature( self, feature_id: Union[str,", "# LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value decoded as utf-8,", "__future__ import absolute_import from __future__ import division from __future__ import print_function import logging", "we don't need an index. if self._initialized: return for dataset in self._statistics.datasets: if", "Args: stats_values: A Dict[str,float] where the key of the dict is the name", "feature whose statistics to obtain from the dataset statistics. Returns: A FeatureNameStatistics protocol", "specific slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of the", "is None: result = stat else: raise ValueError('Duplicate custom_stats for name %s' %", "Returns: A FeatureNameStatistic proto containing the custom statistics for a feature. \"\"\" result", "the input statistics is not of the expected type. ValueError: If the input", "A DatasetFeatureStatisticsList proto. output_path: File path to write the DatasetFeatureStatisticsList proto. Raises: TypeError:", "self._init_index() return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index()", "len( feature.validation_derived_source.source_path)): continue all_match = True for i in range(len(source_paths)): if (source_paths[i] !=", "a plain ' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]]", "= self._feature_map.get(feature_id, None) if index is None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature(", "io_provider: Optional StatisticsIOProvider. If unset, a default will be constructed. Returns: A DatasetListView", "== types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not found in the dataset statistics.'", "%s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList:", "deriver. Matches validation_derived_source deriver_name. source_paths: Source paths for derived features. Matches validation_derived_source.source_path. Returns:", "for stat in self._statistics.custom_stats: if stat.name == name: if result is None: result", "deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match = True for i", "tensorflow_data_validation.utils import io_util from tfx_bsl import statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0", "= io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads", "The custom statistic. Raises: TypeError: If the input feature statistics is not of", "\"\"\"View of a single feature. This class provides accessor methods, as well as", "\"\"\"Creates the FeatureNameStatistics proto for one feature. Args: stats_values: A Dict[str,float] where the", "FeatureNameStatistics protocol buffer. Raises: TypeError: If the input statistics is not of the", "not use this file except in compliance with the License. # You may", "statistic name to have deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name in stat_names:", "for derived features. Matches validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises: ValueError if", "TFRecord. Try reading as a plain ' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id(", "name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for multiple datasets (slices).\"\"\" def", "Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature", "of x.proto().num_stats) in order to support future extension of the proto. \"\"\" def", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') ==", "name: if result is None: result = stat else: raise ValueError('Duplicate custom_stats for", "feature statistics proto. Returns: The custom statistic. Raises: TypeError: If the input feature", "statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl import statistics from google.protobuf import text_format", "!= types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if all_match: results.append(FeatureView(feature)) if len(results) >", "stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) ->", "are normalized to a length 1 path, and can be referred to as", "'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text)", ") -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset statistics. Args: stats: A", "stat else: raise ValueError('Duplicate custom_stats for name %s' % name) return result #", "input_paths.') if io_provider is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not None:", "agreed to in writing, software # distributed under the License is distributed on", "governing permissions and # limitations under the License. \"\"\"Utilities for stats generators.\"\"\" from", "feature_id) self._feature_map[feature_id] = j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y))", "-> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk as a DatasetListView. Args: input_path_prefix:", "for stats_list in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString()) stats = statistics_pb2.DatasetFeatureStatisticsList() stats.ParseFromString(acc.Get())", "derived features match. \"\"\" # TODO(b/221453427): Consider indexing if performance becomes an issue.", "np import pyarrow as pa import tensorflow as tf from tensorflow_data_validation import constants", "'Features must be specified with either path or name within a' ' Dataset.')", "and values are Dicts with keys denoting name of the custom statistic and", "result, %d features matched' % len(results)) if len(results) == 1: return results.pop() return", "= statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator()", "types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for multiple datasets (slices).\"\"\" def __init__(self, stats_proto:", "load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file. Args: input_path:", "statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath, int] self._cross_feature_map = { } #", "types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate feature %s'", "-> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a file. Args: input_path: File", "acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset in", "return None raise TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path, arrow_type)) def", "be referred to as such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting of path", "a default will be constructed. Returns: A DatasetListView containing the merged proto. \"\"\"", "proto. \"\"\" if input_path_prefix is None == input_paths is None: raise ValueError('Must provide", "means it cannot be determined for now). Raises: TypeError: if the type is", "constructed. Returns: A DatasetListView containing the merged proto. \"\"\" if input_path_prefix is None", "is of type %s, should be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for", "statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset statistics.", "logging from typing import Dict, Iterable, Optional, Sequence, Text, Tuple, Union import numpy", "statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file. Args: input_path: Data statistics file path.", "DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset, a default will be constructed. Returns:", "{}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File %s did not", "feature column to be a ' '(Large)List<primitive|struct> or null, but feature {} '", "the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text =", "statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text,", "input_path: Data statistics file path. The file should be a one-record TFRecord file", "protocol buffer. slice_key: Slice key of the slice. Returns: Statistics of the specific", "return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps,", "raise ValueError('Must provide one of input_paths_prefix, input_paths.') if io_provider is None: io_provider =", "derived feature. Raises: ValueError if multiple derived features match. \"\"\" # TODO(b/221453427): Consider", "io_util from tfx_bsl import statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2", "of type %s, should be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats", "normalized to a length 1 path, and can be referred to as such.", "validation_derived_source deriver_name. source_paths: Source paths for derived features. Matches validation_derived_source.source_path. Returns: FeatureView of", "stats.datasets: if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid", "if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if all_match: results.append(FeatureView(feature)) if", "name within a' ' Dataset.') if field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else:", "keys denoting name of the custom statistic and values denoting the value of", "Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0", "as utf-8, or None if it cannot be decoded. Args: value: The bytes", "null (which means it cannot be determined for now). Raises: TypeError: if the", "to in writing, software # distributed under the License is distributed on an", "used in place of proto access (for example, x.numeric_statistics() instead of x.proto().num_stats) in", "None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in", "implied. # See the License for the specific language governing permissions and #", "next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a single record.') except StopIteration: return result", "custom statistics for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically", "the dataset statistics. Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path: The path of", "self._cross_feature_map.get(feature_id, None) if index is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) ->", "feature_path: The path of the feature. Returns: A FeatureNameStatistic proto containing the custom", "a deriver name and its inputs. Args: deriver_name: The name of a deriver.", "numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return", "types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util", "should be a one-record TFRecord file or a plain file containing the statistics", "merged proto. \"\"\" if input_path_prefix is None == input_paths is None: raise ValueError('Must", "from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util from", "Dicts with keys denoting name of the custom statistic and values denoting the", "Arrow type. Args: feature_path: path of the feature. arrow_type: Arrow DataType. Returns: A", "io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto", "Args: stats: A DatasetFeatureStatisticsList proto. output_path: File path to write the DatasetFeatureStatisticsList proto.", "\"\"\" # TODO(b/221453427): Consider indexing if performance becomes an issue. results = []", "return self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a", "specified within the underlying proto by name (instead of path) are normalized to", "found in the feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is", "return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for a dataset (slice).\"\"\" def __init__(self,", "not supported. \"\"\" if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature", "statistics proto in Proto Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If", "in self._feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] = j for j,", "if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match = True for i in range(len(source_paths)):", "Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are feature paths, and values", "ValueError('Feature %s not found in the dataset statistics.' % feature_path) def get_custom_stats( feature_stats:", "Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath,", "= result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) ->", "LINT.IfChange # Semantic domain information can be passed to schema inference using a", "= statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path: Text)", "type %s, should be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "of statistics for a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {}", "'(Large)List<primitive|struct> or null, but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type)", "stats from input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are", "'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain information", "key of the slice. Returns: Statistics of the specific slice. Raises: ValueError: If", "name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def proto(self) ->", "feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values:", "DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord(", "field_identifier = None for j, feature in enumerate(self._statistics.features): if field_identifier is None: field_identifier", "self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self)", "custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with", "if input_path_prefix is None == input_paths is None: raise ValueError('Must provide one of", "# Copyright 2018 Google LLC # # Licensed under the Apache License, Version", "statistics. Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError: If the input statistics is", "bytes value to decode. Returns: The value decoded as utf-8, or None, if", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "the feature whose statistics to obtain from the dataset statistics. Returns: A FeatureNameStatistics", "a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name ==", "type: Dict[str, DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily", "A Dict[str,float] where the key of the dict is the name of the", "with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns", "exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path)", "feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match = True", "be a one-record TFRecord file or a plain file containing the statistics proto", "(slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath, int] self._cross_feature_map", "\"\"\"View of statistics for multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics =", "proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_feature( self, feature_id:", "not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a ' '(Large)List<primitive|struct> or null,", "def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value decoded as utf-8, or None", "and values denoting the value of the custom statistic for the feature. Ex.", "Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists, or None.\"\"\" x_path", "File path to write the DatasetFeatureStatisticsList proto. Raises: TypeError: If the input proto", "Information': 0.5, 'Correlation': 0.1 } feature_path: The path of the feature. Returns: A", "class DatasetListView(object): \"\"\"View of statistics for multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList):", "source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature based on a deriver name", "whose statistics to obtain from the dataset statistics. Returns: A FeatureNameStatistics protocol buffer.", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "it exists. Features specified within the underlying proto by name (instead of path)", "input_paths is None: raise ValueError('Must provide one of input_paths_prefix, input_paths.') if io_provider is", "write the DatasetFeatureStatisticsList proto. Raises: TypeError: If the input proto is not of", "feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT", "proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def custom_statistic(self, name: str)", "permissions and # limitations under the License. \"\"\"Utilities for stats generators.\"\"\" from __future__", "of files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset, a default", "a plain file containing the statistics proto in Proto Text Format. Returns: A", "True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_feature(", "generators.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT,", "get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature based", "io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a", "unset, a default will be constructed. Returns: A DatasetListView containing the merged proto.", "is not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if", "Returns: A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try:", "== 'struct_stats': return self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics", "feature statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The name of the", "within the underlying proto by name (instead of path) are normalized to a", "name of the custom statistics to obtain from the feature statistics proto. Returns:", "first-party custom # statistics (e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric", "types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if all_match: results.append(FeatureView(feature)) if len(results) > 1:", "len(results)) if len(results) == 1: return results.pop() return None class FeatureView(object): \"\"\"View of", "If the input feature is not found in the dataset statistics. \"\"\" if", "logging.info('File %s did not look like a TFRecord. Try reading as a plain", "(which means it cannot be determined for now). Raises: TypeError: if the type", "feature statistics is not of the expected type. ValueError: If the custom statistic", "None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must be", "statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None def bytes_statistics(self)", "_NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING,", "Args: deriver_name: The name of a deriver. Matches validation_derived_source deriver_name. source_paths: Source paths", "slice name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def proto(self)", "return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] = None, io_provider:", "return self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\"", "io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix) acc =", "field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must be specified", "self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self,", "Dict[str, DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize", "are feature paths, and values are Dicts with keys denoting name of the", "sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path:", "for _, v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) ->", "DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it) raise", "protos. io_provider: Optional StatisticsIOProvider. If unset, a default will be constructed. Returns: A", "of a single feature. This class provides accessor methods, as well as access", "return self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\"", "to write the DatasetFeatureStatisticsList proto. Raises: TypeError: If the input proto is not", "proto does not have the specified slice statistics. \"\"\" for slice_stats in stats.datasets:", "self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized", "raise ValueError('Ambiguous result, %d features matched' % len(results)) if len(results) == 1: return", "'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain information can be passed to", "-> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result = None for stat in", "ValueError('Ambiguous result, %d features matched' % len(results)) if len(results) == 1: return results.pop()", "path, and can be referred to as such. Args: feature_id: A types.FeaturePath, Iterable[str]", "obtain from the dataset statistics. Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError: If", "self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if", "UnicodeError: return None return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature", "value or None if arrow_type is null (which means it cannot be determined", "passed, loads files starting with this prefix and ending with a pattern corresponding", "should be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in stats.features: if", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "\"\"\"Retrieve a custom_statistic by name.\"\"\" result = None for stat in self._statistics.custom_stats: if", "containing the custom statistics for each feature in the dataset. \"\"\" result =", "-> Optional[Text]: \"\"\"Returns the value decoded as utf-8, or None if it cannot", "ValueError: If the custom statistic is not found in the feature statistics. \"\"\"", "len(self._slice_map) == 1: for _, v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None)", "\"\"\"Get feature type from Arrow type. Args: feature_path: path of the feature. arrow_type:", "which to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto =", "except Exception as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) ->", "statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The name of the custom", "types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type. Args: feature_path:", "a pattern corresponding to the output of the provided io_provider. input_paths: A list", "self._feature_map[feature_id] = j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if", "ValueError: If the input statistics proto does not have the specified slice statistics.", "stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path:", "issue. results = [] for feature in self.proto().features: if feature.validation_derived_source is None: continue", "not of the expected type. ValueError: If the custom statistic is not found", "slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.')", "stat in self._statistics.custom_stats: if stat.name == name: if result is None: result =", "input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file. Args: input_path: Data", "name of the custom statistic and the value is the numeric value of", "such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting of path steps, or a str,", "[] for feature in self.proto().features: if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name !=", "specified with either path or name within a' ' Dataset.') if field_identifier ==", "from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl import statistics from", "def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats':", "a TFRecord. Try reading as a plain ' 'file.', input_path) return load_stats_text(input_path) def", "statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain information can", "self._feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] = j for j, cross_feature", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "== 'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics", "proto. Where possible, accessors should be used in place of proto access (for", "if self._initialized: return field_identifier = None for j, feature in enumerate(self._statistics.features): if field_identifier", "= sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList,", "slice_key: Slice key of the slice. Returns: Statistics of the specific slice. Raises:", "' '(Large)List<primitive|struct> or null, but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type =", "domain information can be passed to schema inference using a # CustomStatistic with", "IOError: If the input path does not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise", "proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] = None,", "the slice. Returns: Statistics of the specific slice. Raises: ValueError: If the input", "if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s, should be a", "def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix:", "elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path,", "== 'string_stats': return self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics", "statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is null (which means it cannot be", "Sort alphabetically by feature name to have deterministic ordering feature_paths = sorted(stats_values.keys()) for", "Noop if already initialized.\"\"\" if self._initialized: return field_identifier = None for j, feature", "path of the feature. Returns: A FeatureNameStatistic proto containing the custom statistics for", "def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype. Args:", "from which to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto", ") -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps", "See the License for the specific language governing permissions and # limitations under", "def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats':", "return self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats': return", "of the provided io_provider. input_paths: A list of file paths of files containing", "\"\"\"Builds DatasetFeatureStatistics proto with custom stats from input dict. Args: stats_values: A Dict[FeaturePath,", "Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto", "with custom stats from input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path, arrow_type))", "break if all_match: results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous result, %d features", "'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value decoded as", "found in the feature statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text)", "proto from a file. Args: input_path: File path from which to load the", "will be constructed. Returns: A DatasetListView containing the merged proto. \"\"\" if input_path_prefix", "None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index", "= None for j, feature in enumerate(self._statistics.features): if field_identifier is None: field_identifier =", "exists. Features specified within the underlying proto by name (instead of path) are", "if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None def string_statistics(self) ->", "% custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated", "statistics file path. The file should be a one-record TFRecord file or a", "' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type):", "Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path: The path of the feature whose", "statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported arrow type: {}'.format(", "by name.\"\"\" result = None for stat in self._statistics.custom_stats: if stat.name == name:", "Args: input_path: File path from which to load the DatasetFeatureStatisticsList proto. Returns: A", "numpy dtype. Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind)", "def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if", "of type %s, should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text =", "import statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl import statistics from google.protobuf import", "statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord file. Args: input_path: Data statistics file", "numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None def", "y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists,", "arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is null", "None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') ==", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return", "self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if", "= arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return", "output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a file in text", "-> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a file in text format. Args:", "raise TypeError( 'stats is of type %s, should be a ' 'DatasetFeatureStatisticsList proto.'", "the input feature statistics is not of the expected type. ValueError: If the", "if which == 'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats if", "except StopIteration: return result except Exception as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics,", "each feature in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by", "pylint: disable=broad-except logging.info('File %s did not look like a TFRecord. Try reading as", "available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]:", "return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which =", "arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto", "Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]:", "True for i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False", "_make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float],", "proto is not of the expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise", "self._slice_map: raise ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized =", "import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT,", "a ' '(Large)List<primitive|struct> or null, but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type", "name.\"\"\" result = None for stat in self._statistics.custom_stats: if stat.name == name: if", "is null (which means it cannot be determined for now). Raises: TypeError: if", "self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats", "for retrieving first-party custom # statistics (e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]:", "def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) ->", "def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self)", "type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes", "stat.name == name: if result is None: result = stat else: raise ValueError('Duplicate", "self._feature_map = {} # type: Dict[types.FeaturePath, int] self._cross_feature_map = { } # type:", "if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] = j", "DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value", "it cannot be decoded. Args: value: The bytes value to decode. Returns: The", "} feature_path: The path of the feature. Returns: A FeatureNameStatistic proto containing the", "return None class FeatureView(object): \"\"\"View of a single feature. This class provides accessor", "self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if", "a serialized DatasetFeatureStatisticsList proto from a file. Args: input_path: File path from which", "or None, if the value cannot be decoded. \"\"\" try: decoded_value = value.decode('utf-8')", "DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True))", "statistics proto from TFRecord file. Args: input_path: Data statistics file path. Returns: A", "'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path: The path of the feature. Returns:", "input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None )", "for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic", "type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics:", "%s not found in the dataset statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics,", "an index. if self._initialized: return for dataset in self._statistics.datasets: if dataset.name in self._slice_map:", "if feature_id in self._feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] = j", "Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a file in text format.", "is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix)", "should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text)", "matched' % len(results)) if len(results) == 1: return results.pop() return None class FeatureView(object):", "def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def custom_statistic(self, name:", "of the expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is", "the custom statistics for each feature in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics()", "get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom", "the feature statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList:", "continue all_match = True for i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])):", "self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte", "KIND, either express or implied. # See the License for the specific language", "type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise", "case we don't need an index. if self._initialized: return for dataset in self._statistics.datasets:", "field_identifier: raise ValueError( 'Features must be specified with either path or name within", "j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in", "input statistics is not of the expected type. ValueError: If the input feature", "by statistic name to have deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name in", "{} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif", "dataset statistics. Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path: The path of the", "for slice_stats in stats.datasets: if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return", "statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset statistics. Args: stats: A DatasetFeatureStatistics protocol", "a DatasetListView. Args: input_path_prefix: If passed, loads files starting with this prefix and", "arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif", "features. Matches validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises: ValueError if multiple derived", "the feature statistics proto. Returns: The custom statistic. Raises: TypeError: If the input", "the underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]]", "support future extension of the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics =", "None return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from", "Returns: DatasetFeatureStatistics proto containing the custom statistics for each feature in the dataset.", "stats: A DatasetFeatureStatistics protocol buffer. feature_path: The path of the feature whose statistics", "str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View", "DatasetFeatureStatisticsList proto stored in text format. Args: input_path: File path from which to", "TypeError('Expected feature column to be a ' '(Large)List<primitive|struct> or null, but feature {}", "index = self._cross_feature_map.get(feature_id, None) if index is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def", "ANY KIND, either express or implied. # See the License for the specific", "If the input proto is not of the expected type. \"\"\" if not", "types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one feature. Args: stats_values: A", "= True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def", "of the custom statistic and values denoting the value of the custom statistic", "%s' % name) return result # TODO(b/202910677): Add convenience methods for retrieving first-party", "\"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None", "io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "self._init_index() feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if index is None:", "= io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for", "or null, but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if", "= feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must be specified with", "if index is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists", "A FeatureNameStatistics protocol buffer. custom_stats_name: The name of the custom statistics to obtain", "be used in place of proto access (for example, x.numeric_statistics() instead of x.proto().num_stats)", "field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in", "custom_stats for name %s' % name) return result # TODO(b/202910677): Add convenience methods", "is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must", "ValueError('Must provide one of input_paths_prefix, input_paths.') if io_provider is None: io_provider = statistics_io_impl.get_io_provider()", "result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics:", "v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object):", "= (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id)", "to the underlying proto. Where possible, accessors should be used in place of", "future extension of the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto", "a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text(", "byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None def", "ValueError( 'Features must be specified with either path or name within a' '", "If passed, loads files starting with this prefix and ending with a pattern", "Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return", "stored in text format. Args: input_path: File path from which to load the", "%s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics:", "tensorflow as tf from tensorflow_data_validation import constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow", "one-record TFRecord file or a plain file containing the statistics proto in Proto", "Dict[types.FeaturePath, int] self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics =", "proto stored in text format. Args: input_path: File path from which to load", "cannot be decoded. \"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError: return None return", "feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not found in the dataset", "= self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats': return", "_init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in case we don't need an", "to decode. Returns: The value decoded as utf-8, or None, if the value", "consisting of path steps, or a str, which is converted to a length", "_, v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]:", "type from Arrow type. Args: feature_path: path of the feature. arrow_type: Arrow DataType.", "the provided io_provider. input_paths: A list of file paths of files containing sharded", "statistics proto from file. Args: input_path: Data statistics file path. The file should", "Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature statistics. Args: feature_stats: A", "types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for multiple datasets", "self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys()", "the custom statistics to obtain from the feature statistics proto. Returns: The custom", "feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type. Args:", "\"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary(", "raise ValueError('load_stats_tfrecord expects a single record.') except StopIteration: return result except Exception as", "which = self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats':", "input_paths_prefix, input_paths.') if io_provider is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not", "Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return", "feature_id: A types.FeaturePath, Iterable[str] consisting of path steps, or a str, which is", "str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature based on a deriver", "write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a file", "-> Optional['FeatureView']: \"\"\"Retrieve a derived feature based on a deriver name and its", "from numpy dtype. Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return", "# limitations under the License. \"\"\"Utilities for stats generators.\"\"\" from __future__ import absolute_import", "custom statistic and values denoting the value of the custom statistic for the", "output of the provided io_provider. input_paths: A list of file paths of files", "input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are feature paths,", "self._init_index() index = self._feature_map.get(feature_id, None) if index is None: return None return FeatureView(self._statistics.features[index])", "None) def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics", "return None return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type", "name and its inputs. Args: deriver_name: The name of a deriver. Matches validation_derived_source", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "file should be a one-record TFRecord file or a plain file containing the", "proto. Returns: The custom statistic. Raises: TypeError: If the input feature statistics is", "alphabetically by feature name to have deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path", "path. The file should be a one-record TFRecord file or a plain file", "statistics proto does not have the specified slice statistics. \"\"\" for slice_stats in", "statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a file in", "custom statistics to obtain from the feature statistics proto. Returns: The custom statistic.", "length 1 path, and can be referred to as such. Args: feature_id: A", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result", "import division from __future__ import print_function import logging from typing import Dict, Iterable,", "Optional, Sequence, Text, Tuple, Union import numpy as np import pyarrow as pa", "is not of the expected type. ValueError: If the custom statistic is not", "raise ValueError('Duplicate custom_stats for name %s' % name) return result # TODO(b/202910677): Add", "stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the", "proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def", "indexing if performance becomes an issue. results = [] for feature in self.proto().features:", "applicable law or agreed to in writing, software # distributed under the License", "in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index() return", "== 'num_stats': return self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics", "statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s, should", "for now). Raises: TypeError: if the type is not supported. \"\"\" if pa.types.is_null(arrow_type):", "types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if", "-> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats", "Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific slice. Args: stats: A", "Dataset.') if field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if", "as access to the underlying proto. Where possible, accessors should be used in", "\"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a file. Args: input_path: File path from", "# type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized = False def _init_index(self):", "Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path: The path of the", "results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous result, %d features matched' % len(results))", "not of the expected type. ValueError: If the input feature is not found", "return feature_stats raise ValueError('Feature %s not found in the dataset statistics.' % feature_path)", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "The bytes value to decode. Returns: The value decoded as utf-8, or None,", "elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif", "path does not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path))", "the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)):", "{} # type: Dict[types.FeaturePath, int] self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath],", "= None for stat in self._statistics.custom_stats: if stat.name == name: if result is", "= statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name to have deterministic ordering feature_paths", "' Dataset.') if field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path)", "result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name to have deterministic ordering stat_names =", "writing, software # distributed under the License is distributed on an \"AS IS\"", "result is None: result = stat else: raise ValueError('Duplicate custom_stats for name %s'", "stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data", "-> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature statistics. Args:", "The name of a deriver. Matches validation_derived_source deriver_name. source_paths: Source paths for derived", "isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s, should be a '", "custom statistic is not found in the feature statistics. \"\"\" if not isinstance(feature_stats,", "raise ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True", "underlying proto. Where possible, accessors should be used in place of proto access", "None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys()", "Dict[str,float]] where the keys are feature paths, and values are Dicts with keys", "is not supported. \"\"\" if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected", "deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature based on a", "Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return", "statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT", "tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl import statistics from google.protobuf", "compliance with the License. # You may obtain a copy of the License", "= statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:", "-> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats", "be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def", "self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics", "with keys denoting name of the custom statistic and values denoting the value", "Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one feature.", "-> Optional['FeatureView']: \"\"\"Retrieve a feature if it exists. Features specified within the underlying", "text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u':", "input proto is not of the expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList):", "from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from", "} # LINT.IfChange # Semantic domain information can be passed to schema inference", "result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics", "types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not found in the dataset statistics.' %", "the input statistics proto does not have the specified slice statistics. \"\"\" for", "match. \"\"\" # TODO(b/221453427): Consider indexing if performance becomes an issue. results =", "return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]]", "if the type is not supported. \"\"\" if pa.types.is_null(arrow_type): return None if not", "None if it cannot be decoded. Args: value: The bytes value to decode.", "self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature based on", "\"\"\"View of a single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto", "in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not found", "if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve", "return self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve", "custom # statistics (e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics", "if field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError(", "is None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)):", "becomes an issue. results = [] for feature in self.proto().features: if feature.validation_derived_source is", "try: next(it) raise ValueError('load_stats_tfrecord expects a single record.') except StopIteration: return result except", "== 1: for _, v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def", "for stats generators.\"\"\" from __future__ import absolute_import from __future__ import division from __future__", "str, which is converted to a length one path. Returns: A FeatureView, or", "it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a", "return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats')", "def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats':", "ValueError if multiple derived features match. \"\"\" # TODO(b/221453427): Consider indexing if performance", "CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics =", "not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s, should be a", "Tuple, Union import numpy as np import pyarrow as pa import tensorflow as", "return self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\"", "custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found in the feature statistics.' % custom_stats_name)", "mappings.\"\"\" # Lazily initialize in case we don't need an index. if self._initialized:", "(the \"License\"); # you may not use this file except in compliance with", "Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes internal", "feature.validation_derived_source.source_path)): continue all_match = True for i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto(", "statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None", "proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return", "be decoded. \"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError: return None return decoded_value", "stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text) ->", "= value.decode('utf-8') except UnicodeError: return None return decoded_value def get_feature_type( dtype: np.dtype) ->", "raise ValueError( 'Features must be specified with either path or name within a'", "A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def", "# Unless required by applicable law or agreed to in writing, software #", "the dataset statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) ->", "print_function import logging from typing import Dict, Iterable, Optional, Sequence, Text, Tuple, Union", "by applicable law or agreed to in writing, software # distributed under the", "\"\"\"Loads data statistics proto from TFRecord file. Args: input_path: Data statistics file path.", "results.pop() return None class FeatureView(object): \"\"\"View of a single feature. This class provides", "slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index()", "slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of the slice.", "feature. Returns: A FeatureNameStatistic proto containing the custom statistics for a feature. \"\"\"", "keys are feature paths, and values are Dicts with keys denoting name of", "which == 'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats if which", "= [] for feature in self.proto().features: if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name", "dtype. Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def", "in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if all_match:", "If the custom statistic is not found in the feature statistics. \"\"\" if", "the dict is the name of the custom statistic and the value is", "self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which", "list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for a", "file except in compliance with the License. # You may obtain a copy", "index. if self._initialized: return for dataset in self._statistics.datasets: if dataset.name in self._slice_map: raise", "if len(results) == 1: return results.pop() return None class FeatureView(object): \"\"\"View of a", "-> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def custom_statistic(self, name: str) ->", "is converted to a length one path. Returns: A FeatureView, or None if", "ValueError('load_stats_tfrecord expects a single record.') except StopIteration: return result except Exception as e:", "float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats from input dict.", "{ 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING,", "(len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match = True for i in range(len(source_paths)): if", "possible, accessors should be used in place of proto access (for example, x.numeric_statistics()", "or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path)", "buffer. feature_path: The path of the feature whose statistics to obtain from the", "import io_util from tfx_bsl import statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0 import", "TypeError('statistics is of type %s, should be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__)", "obtain from the feature statistics proto. Returns: The custom statistic. Raises: TypeError: If", "arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) ->", "from tensorflow_data_validation import constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from", "is the numeric value of the custom statistic of that feature. Ex. {", "the underlying proto. Where possible, accessors should be used in place of proto", "statistics. \"\"\" for slice_stats in stats.datasets: if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList()", "None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path:", "the feature statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The name of", "path from which to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\"", "type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s,", "for one feature. Args: stats_values: A Dict[str,float] where the key of the dict", "Args: feature_id: A types.FeaturePath, Iterable[str] consisting of path steps, or a str, which", "decoded. Args: value: The bytes value to decode. Returns: The value decoded as", "MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats')", "\"\"\"Retrieve a feature if it exists. Features specified within the underlying proto by", "Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats from input", "stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:", "can be referred to as such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting of", "custom statistic for the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation':", "_make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for", "paths for derived features. Matches validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises: ValueError", "(source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if all_match: results.append(FeatureView(feature)) if len(results)", "for j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map:", "Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if which == 'num_stats':", "input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths)", "_normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if index is", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "1: return results.pop() return None class FeatureView(object): \"\"\"View of a single feature. This", "proto access (for example, x.numeric_statistics() instead of x.proto().num_stats) in order to support future", "from the feature statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The name", "(e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if", "the FeatureNameStatistics proto for one feature. Args: stats_values: A Dict[str,float] where the key", "feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result", "} # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized = False def", "return result except Exception as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath", "path of the feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None", "raise ValueError('Custom statistics %s not found in the feature statistics.' % custom_stats_name) def", "the specific slice. Raises: ValueError: If the input statistics proto does not have", "self._initialized: return for dataset in self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate slice", "decoded_value = value.decode('utf-8') except UnicodeError: return None return decoded_value def get_feature_type( dtype: np.dtype)", "slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific slice. Args: stats:", "the input feature is not found in the dataset statistics. \"\"\" if not", "if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be", "Dict[FeaturePath, Dict[str,float]] where the keys are feature paths, and values are Dicts with", "if field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id", "input_path_prefix is None == input_paths is None: raise ValueError('Must provide one of input_paths_prefix,", "associated with a specific slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice", "\"\"\"Get feature statistics from the dataset statistics. Args: stats: A DatasetFeatureStatistics protocol buffer.", "x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a", "tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S':", "not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try: return", "files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset, a default will", "if which == 'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of a", "def _init_index(self): \"\"\"Initializes internal indices. Noop if already initialized.\"\"\" if self._initialized: return field_identifier", "Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "which is converted to a length one path. Returns: A FeatureView, or None", "def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats':", "self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of", "buffer. slice_key: Slice key of the slice. Returns: Statistics of the specific slice.", "DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in", "input_paths: A list of file paths of files containing sharded DatasetFeatureStatisticsList protos. io_provider:", "to be a ' '(Large)List<primitive|struct> or null, but feature {} ' 'was {}.'.format(feature_path,", "of the custom statistic and the value is the numeric value of the", "def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with", "DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk as a DatasetListView. Args: input_path_prefix: If", "in self.proto().features: if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue if", "statistic and values denoting the value of the custom statistic for the feature.", "statistics from the dataset statistics. Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path: The", "containing the merged proto. \"\"\" if input_path_prefix is None == input_paths is None:", "name of the custom statistic and values denoting the value of the custom", "if already initialized.\"\"\" if self._initialized: return field_identifier = None for j, feature in", "\"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except", "ending with a pattern corresponding to the output of the provided io_provider. input_paths:", "from Arrow type. Args: feature_path: path of the feature. arrow_type: Arrow DataType. Returns:", "referred to as such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting of path steps,", "DatasetFeatureStatisticsList proto from a file. Args: input_path: File path from which to load", "of the custom statistic of that feature. Ex. { 'Mutual Information': 0.5, 'Correlation':", "feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must be specified with either", "import logging from typing import Dict, Iterable, Optional, Sequence, Text, Tuple, Union import", "0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6 } }", "value of the custom statistic of that feature. Ex. { 'Mutual Information': 0.5,", "is of type %s, should be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for", "and ending with a pattern corresponding to the output of the provided io_provider.", "the DatasetFeatureStatisticsList proto. Raises: TypeError: If the input proto is not of the", "{}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds", "types.FeaturePath], int] self._statistics = stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes internal indices.", "{ 'Mutual Information': 0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto containing the", "cannot be determined for now). Raises: TypeError: if the type is not supported.", "custom statistic of that feature. Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1 }", "is None: raise ValueError('Must provide one of input_paths_prefix, input_paths.') if io_provider is None:", "the underlying proto by name (instead of path) are normalized to a length", "a deriver. Matches validation_derived_source deriver_name. source_paths: Source paths for derived features. Matches validation_derived_source.source_path.", "return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a", "the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) ->", "dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s,", "data statistics proto from TFRecord file. Args: input_path: Data statistics file path. Returns:", "available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]:", "input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord file. Args: input_path:", "'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto containing the custom statistics for each", "__init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {} # type: Dict[str, DatasetView]", "path or name within a' ' Dataset.') if field_identifier == 'name': feature_id =", "of derived feature. Raises: ValueError if multiple derived features match. \"\"\" # TODO(b/221453427):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "type from numpy dtype. Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\"", "load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a file.", "Exception: # pylint: disable=broad-except logging.info('File %s did not look like a TFRecord. Try", "self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct", "statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain", "\"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError: return None return decoded_value def get_feature_type(", "def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result =", "return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found in the feature statistics.'", "types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics", "feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate", "pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported arrow", "proto. Raises: TypeError: If the input proto is not of the expected type.", "import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import", "cross-feature if it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index()", "proto.' % type(stats).__name__) for feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "proto from TFRecord file. Args: input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList", "input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics", "deriver_name: The name of a deriver. Matches validation_derived_source deriver_name. source_paths: Source paths for", "e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from", "input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a file. Args:", "Returns: FeatureView of derived feature. Raises: ValueError if multiple derived features match. \"\"\"", "DatasetListView. Args: input_path_prefix: If passed, loads files starting with this prefix and ending", "the License. \"\"\"Utilities for stats generators.\"\"\" from __future__ import absolute_import from __future__ import", "\"\"\"Retrieve a cross-feature if it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path =", "\"\"\"View of statistics for a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map =", "expected type. ValueError: If the input feature is not found in the dataset", "result = next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a single record.') except StopIteration:", "return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class", "raise TypeError('feature_stats is of type %s, should be a ' 'FeatureNameStatistics proto.' %", "the type is not supported. \"\"\" if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type):", "type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads", "is not found in the feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise", "if it exists. Features specified within the underlying proto by name (instead of", "Sequence, Text, Tuple, Union import numpy as np import pyarrow as pa import", "\"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s, should", "if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve", "\"\"\"Retrieve a derived feature based on a deriver name and its inputs. Args:", "# pylint: disable=broad-except logging.info('File %s did not look like a TFRecord. Try reading", "Optional[Text]: \"\"\"Returns the value decoded as utf-8, or None if it cannot be", "tf from tensorflow_data_validation import constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util", "reading as a plain ' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str,", "FeatureView(object): \"\"\"View of a single feature. This class provides accessor methods, as well", "ValueError: If the input feature is not found in the dataset statistics. \"\"\"", "TypeError: If the input proto is not of the expected type. \"\"\" if", "import pyarrow as pa import tensorflow as tf from tensorflow_data_validation import constants from", "in text format. Args: input_path: File path from which to load the DatasetFeatureStatisticsList", "statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None def string_statistics(self)", "if index is None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str,", "the License for the specific language governing permissions and # limitations under the", "of statistics for multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto", "statistics for a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} #", "as a DatasetListView. Args: input_path_prefix: If passed, loads files starting with this prefix", "return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View", "self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a", "this prefix and ending with a pattern corresponding to the output of the", "maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value decoded as utf-8, or None if", "DatasetFeatureStatistics proto containing the custom statistics for each feature in the dataset. \"\"\"", "def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a", "If the input statistics proto does not have the specified slice statistics. \"\"\"", "tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: # pylint:", "== 1: return results.pop() return None class FeatureView(object): \"\"\"View of a single feature.", "if available.\"\"\" which = self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats if which", "is not of the expected type. ValueError: If the input feature is not", "single record.') except StopIteration: return result except Exception as e: raise e def", "null, but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type):", "schema inference using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc)", "return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type", "of a deriver. Matches validation_derived_source deriver_name. source_paths: Source paths for derived features. Matches", "stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath, int] self._cross_feature_map = { }", "enumerate(self._statistics.features): if field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise", "'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "proto for one feature. Args: stats_values: A Dict[str,float] where the key of the", "tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl", "-> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for a dataset", "in stats.datasets: if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise", "if all_match: results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous result, %d features matched'", "feature. Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path: The path of", "tfx_bsl import statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE =", "__init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying", "dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path:", "file. Args: input_path: File path from which to load the DatasetFeatureStatisticsList proto. Returns:", "return field_identifier = None for j, feature in enumerate(self._statistics.features): if field_identifier is None:", "self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for a dataset (slice).\"\"\" def", "feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must be specified with either path or", "not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if index", "look like a TFRecord. Try reading as a plain ' 'file.', input_path) return", "methods, as well as access to the underlying proto. Where possible, accessors should", "def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature", "and can be referred to as such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting", "self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider]", "def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in", "a specific slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of", "x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index =", "key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file.", "Sort alphabetically by statistic name to have deterministic ordering stat_names = sorted(stats_values.keys()) for", "a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name", "Try reading as a plain ' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps:", "files starting with this prefix and ending with a pattern corresponding to the", "types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths:", "dataset in self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate slice name %s' %", "= statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.') def load_statistics( input_path: Text)", "DatasetFeatureStatistics proto with custom stats from input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]]", "statistics is not of the expected type. ValueError: If the input feature is", "-> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype. Args: dtype: Numpy dtype. Returns:", "Version 2.0 (the \"License\"); # you may not use this file except in", "not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s, should be a '", "\"\"\"Read a sharded DatasetFeatureStatisticsList from disk as a DatasetListView. Args: input_path_prefix: If passed,", "A FeatureNameStatistics protocol buffer. Raises: TypeError: If the input statistics is not of", "'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return", "from TFRecord file. Args: input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList proto.", "for a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type:", "from __future__ import print_function import logging from typing import Dict, Iterable, Optional, Sequence,", "is None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]],", "!= len( feature.validation_derived_source.source_path)): continue all_match = True for i in range(len(source_paths)): if (source_paths[i]", "which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats return None", "well as access to the underlying proto. Where possible, accessors should be used", "statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]:", "DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is null (which means", "= types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate feature", "of proto access (for example, x.numeric_statistics() instead of x.proto().num_stats) in order to support", "initialize in case we don't need an index. if self._initialized: return for dataset", "self._statistics = stats_proto self._slice_map = {} # type: Dict[str, DatasetView] self._initialized = False", "statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None def common_statistics(self)", "\"\"\"Get feature type from numpy dtype. Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type", "and the value is the numeric value of the custom statistic of that", "of that feature. Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path: The", "'name': feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise", "Raises: TypeError: if the type is not supported. \"\"\" if pa.types.is_null(arrow_type): return None", "% feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram,", "Source paths for derived features. Matches validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises:", "decoded as utf-8, or None if it cannot be decoded. Args: value: The", "None class FeatureView(object): \"\"\"View of a single feature. This class provides accessor methods,", "Args: input_path: Data statistics file path. The file should be a one-record TFRecord", "a cross-feature if it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path)", "\"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature", "feature. Raises: ValueError if multiple derived features match. \"\"\" # TODO(b/221453427): Consider indexing", "single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) ->", "the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name to", "input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in text format.", "False break if all_match: results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous result, %d", "of the feature whose statistics to obtain from the dataset statistics. Returns: A", "cannot be decoded. Args: value: The bytes value to decode. Returns: The value", "be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in stats.features: if feature_path", "return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for multiple datasets (slices).\"\"\"", "0.1 } feature_path: The path of the feature. Returns: A FeatureNameStatistic proto containing", "\"\"\"Loads data statistics proto from file. Args: input_path: Data statistics file path. The", "if self._initialized: return for dataset in self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate", "struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return", "statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return", "a file. Args: input_path: File path from which to load the DatasetFeatureStatisticsList proto.", "accessor methods, as well as access to the underlying proto. Where possible, accessors", "statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {} # type: Dict[str, DatasetView] self._initialized =", "google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i':", "deriver name and its inputs. Args: deriver_name: The name of a deriver. Matches", "str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if", "if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self) ->", "if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class", "result # TODO(b/202910677): Add convenience methods for retrieving first-party custom # statistics (e.g.,", "a single feature. This class provides accessor methods, as well as access to", "alphabetically by statistic name to have deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name", "if len(results) > 1: raise ValueError('Ambiguous result, %d features matched' % len(results)) if", "types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature if it exists. Features specified", "} } Returns: DatasetFeatureStatistics proto containing the custom statistics for each feature in", "type(stats).__name__) for feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature", "'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return", "from input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are feature", "feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature if it exists.", "inputs. Args: deriver_name: The name of a deriver. Matches validation_derived_source deriver_name. source_paths: Source", "from the dataset statistics. Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError: If the", "statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats if", "value decoded as utf-8, or None, if the value cannot be decoded. \"\"\"", "the input path does not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input", "\"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None", "dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are feature paths, and", "int] self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto", "of the custom statistic for the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information':", "Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for a dataset (slice).\"\"\"", "name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result = None for", "for the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1 },", "custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found in the feature", "pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type):", "not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: #", "% feature_id) self._feature_map[feature_id] = j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x),", "of the feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if", "statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s, should be a ' 'FeatureNameStatistics proto.'", "custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found in", "serialized DatasetFeatureStatisticsList proto from a file. Args: input_path: File path from which to", "utf-8, or None, if the value cannot be decoded. \"\"\" try: decoded_value =", "{ 'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path: The path of the feature.", "None: \"\"\"Writes a DatasetFeatureStatisticsList proto to a file in text format. Args: stats:", "the output of the provided io_provider. input_paths: A list of file paths of", "= next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a single record.') except StopIteration: return", "Raises: IOError: If the input path does not exist. \"\"\" if not tf.io.gfile.exists(input_path):", "steps, or a str, which is converted to a length one path. Returns:", "text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList", "return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {}", "Args: feature_path: path of the feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value", "types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset statistics. Args: stats:", "return for dataset in self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate slice name", "for i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break", "feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s' %", "key of the dict is the name of the custom statistic and the", "feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics", "proto with custom stats from input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where", "next(it) raise ValueError('load_stats_tfrecord expects a single record.') except StopIteration: return result except Exception", "{} has unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text,", "of a single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def", "with a specific slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key", "ordering stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def", "for dataset in self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate slice name %s'", "= False break if all_match: results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous result,", "like a TFRecord. Try reading as a plain ' 'file.', input_path) return load_stats_text(input_path)", "OF ANY KIND, either express or implied. # See the License for the", "proto.\"\"\" return self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None)", "raise TypeError('statistics is of type %s, should be a ' 'DatasetFeatureStatistics proto.' %", "load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None", "stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord", "feature_id is not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None)", "\"\"\"Returns the value decoded as utf-8, or None if it cannot be decoded.", "return self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def", "disable=broad-except logging.info('File %s did not look like a TFRecord. Try reading as a", "feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not", "the key of the dict is the name of the custom statistic and", "feature type from numpy dtype. Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value.", "specified DatasetFeatureStatisticsList proto stored in text format. Args: input_path: File path from which", "text format. Args: stats: A DatasetFeatureStatisticsList proto. output_path: File path to write the", "= statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a single", "Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read a sharded", "return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View", "or None if it cannot be decoded. Args: value: The bytes value to", "input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl(", "getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found in the feature statistics.' %", "all_match = True for i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match", "the custom statistic for the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5,", "stats generators.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "None == input_paths is None: raise ValueError('Must provide one of input_paths_prefix, input_paths.') if", "statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics", "statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from", "be determined for now). Raises: TypeError: if the type is not supported. \"\"\"", "value cannot be decoded. \"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError: return None", "== 'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of a single cross", "starting with this prefix and ending with a pattern corresponding to the output", "return self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\"", "pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type. Args: feature_path: path of", "slice_stats in stats.datasets: if slice_stats.name == slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result", "default will be constructed. Returns: A DatasetListView containing the merged proto. \"\"\" if", "def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath, int] self._cross_feature_map =", "Returns: Statistics of the specific slice. Raises: ValueError: If the input statistics proto", "TODO(b/221453427): Consider indexing if performance becomes an issue. results = [] for feature", "cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics:", "self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common", "\"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s, should be", "underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key,", "containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset, a default will be", "self._statistics.custom_stats: if stat.name == name: if result is None: result = stat else:", "result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto to", "containing the statistics proto in Proto Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises:", "bytes) -> Optional[Text]: \"\"\"Returns the value decoded as utf-8, or None if it", "can be passed to schema inference using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO", "of the expected type. ValueError: If the custom statistic is not found in", "extension of the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def", "which == 'struct_stats': return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of a single", "Args: input_path_prefix: If passed, loads files starting with this prefix and ending with", "name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if", "all_match: results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous result, %d features matched' %", "statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f':", "' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path):", "get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']:", "feature %s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized = True def proto(self) ->", "new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath)", "or agreed to in writing, software # distributed under the License is distributed", "from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT,", "import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING,", "the custom statistics for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort", "Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The name of the custom statistics", "(x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if index is None: return None return", "# CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) ->", "load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text", "None) if index is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]:", "feature is not found in the dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics):", "Returns: A DatasetListView containing the merged proto. \"\"\" if input_path_prefix is None ==", "plain ' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] )", "statistics is not of the expected type. ValueError: If the custom statistic is", "None if feature_id is not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index =", "TFRecord file. Args: input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\"", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one", "The name of the custom statistics to obtain from the feature statistics proto.", "License. # You may obtain a copy of the License at # #", "feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve", "0.6 } } Returns: DatasetFeatureStatistics proto containing the custom statistics for each feature", "None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a ' '(Large)List<primitive|struct>", "format. Args: stats: A DatasetFeatureStatisticsList proto. output_path: File path to write the DatasetFeatureStatisticsList", "feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset statistics. Args:", "if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not found in the", "or None if feature_id is not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index", "isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object):", "based on a deriver name and its inputs. Args: deriver_name: The name of", "does not have the specified slice statistics. \"\"\" for slice_stats in stats.datasets: if", "Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6 }", "x.numeric_statistics() instead of x.proto().num_stats) in order to support future extension of the proto.", "% feature_id) self._cross_feature_map[feature_id] = j self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve", "= DatasetView(dataset) self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\"", "return None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats')", "underlying proto.\"\"\" return self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic", "for the specific language governing permissions and # limitations under the License. \"\"\"Utilities", "statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None def struct_statistics(self)", "the merged proto. \"\"\" if input_path_prefix is None == input_paths is None: raise", "feature. Args: stats_values: A Dict[str,float] where the key of the dict is the", "Data statistics file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path])", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "feature paths, and values are Dicts with keys denoting name of the custom", "custom statistic. Raises: TypeError: If the input feature statistics is not of the", "if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s, should be", "1 path, and can be referred to as such. Args: feature_id: A types.FeaturePath,", "A DatasetFeatureStatisticsList proto. Raises: IOError: If the input path does not exist. \"\"\"", "order to support future extension of the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics):", "Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature if it exists. Features specified within", "{ } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized = False", "Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return", "Optional[str] = None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) ->", "statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one feature. Args: stats_values: A Dict[str,float] where", "\"\"\"Initializes internal indices. Noop if already initialized.\"\"\" if self._initialized: return field_identifier = None", "dataset.name in self._slice_map: raise ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset)", "= j self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\"", "License, Version 2.0 (the \"License\"); # you may not use this file except", "numpy as np import pyarrow as pa import tensorflow as tf from tensorflow_data_validation", "'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain information can be", "proto. output_path: File path to write the DatasetFeatureStatisticsList proto. Raises: TypeError: If the", "in enumerate(self._statistics.features): if field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier:", "return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return", "prefix and ending with a pattern corresponding to the output of the provided", "TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath,", "def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) ->", "\"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths:", "for custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom", "if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for", "Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If the input path does", "proto to a file in text format. Args: stats: A DatasetFeatureStatisticsList proto. output_path:", "stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def custom_statistic(self,", "as np import pyarrow as pa import tensorflow as tf from tensorflow_data_validation import", "the value decoded as utf-8, or None if it cannot be decoded. Args:", "def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key:", "statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } #", "statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a single record.')", "%s' % feature_id) self._feature_map[feature_id] = j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id =", "slice. Returns: Statistics of the specific slice. Raises: ValueError: If the input statistics", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are feature paths, and values are", "A FeatureNameStatistic proto containing the custom statistics for a feature. \"\"\" result =", "or None if arrow_type is null (which means it cannot be determined for", "DatasetFeatureStatisticsList proto to a file in text format. Args: stats: A DatasetFeatureStatisticsList proto.", "return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the", "DatasetListView containing the merged proto. \"\"\" if input_path_prefix is None == input_paths is", "StatisticsIOProvider. If unset, a default will be constructed. Returns: A DatasetListView containing the", "load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string(", "Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a file. Args: input_path:", "-> Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map)", ") -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature statistics.", "Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for _, v in self._slice_map.items(): return v", "= {} # type: Dict[types.FeaturePath, int] self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath,", "If the input feature statistics is not of the expected type. ValueError: If", "values denoting the value of the custom statistic for the feature. Ex. {", "def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto", "arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature", "If unset, a default will be constructed. Returns: A DatasetListView containing the merged", "custom stats from input dict. Args: stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys", "-> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path)", "Raises: TypeError: If the input proto is not of the expected type. \"\"\"", "Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If the input path does not exist.", "statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name to have deterministic ordering stat_names", "int] self._statistics = stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes internal indices. Noop", "types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] =", "def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for", "be constructed. Returns: A DatasetListView containing the merged proto. \"\"\" if input_path_prefix is", "typing import Dict, Iterable, Optional, Sequence, Text, Tuple, Union import numpy as np", "import constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import", "TypeError: if the type is not supported. \"\"\" if pa.types.is_null(arrow_type): return None if", "file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result =", "types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath):", "results = [] for feature in self.proto().features: if feature.validation_derived_source is None: continue if", "load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord file. Args:", "access to the underlying proto. Where possible, accessors should be used in place", "cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) ->", "feature in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature", "or implied. # See the License for the specific language governing permissions and", "return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File %s did not look like", "Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord file. Args: input_path: Data", "dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath, int]", "slice key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from", "FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8,", "self._statistics = stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes internal indices. Noop if", "a length 1 path, and can be referred to as such. Args: feature_id:", "the underlying proto.\"\"\" return self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] )", "result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.') def load_statistics( input_path:", "exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path,", "information can be passed to schema inference using a # CustomStatistic with name=DOMAIN_INFO.", "type. Args: feature_path: path of the feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type", "initialized.\"\"\" if self._initialized: return field_identifier = None for j, feature in enumerate(self._statistics.features): if", "buffer. Raises: TypeError: If the input statistics is not of the expected type.", "def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {} # type: Dict[str,", "{ 'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation':", "provided io_provider. input_paths: A list of file paths of files containing sharded DatasetFeatureStatisticsList", "its inputs. Args: deriver_name: The name of a deriver. Matches validation_derived_source deriver_name. source_paths:", "ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def", "def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return", "as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get", "protocol buffer. custom_stats_name: The name of the custom statistics to obtain from the", "stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes internal indices. Noop if already initialized.\"\"\"", "methods for retrieving first-party custom # statistics (e.g., MI, NLP). def numeric_statistics(self) ->", "FeatureView of derived feature. Raises: ValueError if multiple derived features match. \"\"\" #", "Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return", "path. Returns: A FeatureView, or None if feature_id is not present. \"\"\" feature_id", "raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] = j for j, cross_feature in", "# Lazily initialize in case we don't need an index. if self._initialized: return", "stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\"", "the dataset statistics. Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError: If the input", "% len(results)) if len(results) == 1: return results.pop() return None class FeatureView(object): \"\"\"View", "available.\"\"\" which = self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats if which ==", "Lazily initialize in case we don't need an index. if self._initialized: return for", "# type: Dict[str, DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" #", "types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists, or None.\"\"\"", "for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text)", "paths, and values are Dicts with keys denoting name of the custom statistic", "custom_stats_name: The name of the custom statistics to obtain from the feature statistics", "expects a single record.') except StopIteration: return result except Exception as e: raise", "Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file. Args: input_path: Data statistics", "_normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if index is None: return None return", "%s, should be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats:", "validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises: ValueError if multiple derived features match.", "(slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {} # type:", "the feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type", "statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats from input dict. Args: stats_values: A", "use this file except in compliance with the License. # You may obtain", "the feature. Returns: A FeatureNameStatistic proto containing the custom statistics for a feature.", "j, feature in enumerate(self._statistics.features): if field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id')", "to a file in text format. Args: stats: A DatasetFeatureStatisticsList proto. output_path: File", "%d features matched' % len(results)) if len(results) == 1: return results.pop() return None", "language governing permissions and # limitations under the License. \"\"\"Utilities for stats generators.\"\"\"", "statistic for the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1", "feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature", "Slice key of the slice. Returns: Statistics of the specific slice. Raises: ValueError:", "-> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in text format. Args: input_path:", "value is the numeric value of the custom statistic of that feature. Ex.", "False def _init_index(self): \"\"\"Initializes internal indices. Noop if already initialized.\"\"\" if self._initialized: return", "features matched' % len(results)) if len(results) == 1: return results.pop() return None class", "\"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None", "the custom statistic is not found in the feature statistics. \"\"\" if not", "%s did not look like a TFRecord. Try reading as a plain '", "if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve", "%s not found in the feature statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList,", "None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue", "get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for _, v in self._slice_map.items():", "proto containing the custom statistics for each feature in the dataset. \"\"\" result", "TFRecord file or a plain file containing the statistics proto in Proto Text", "accessors should be used in place of proto access (for example, x.numeric_statistics() instead", "proto.\"\"\" return self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']:", "slice statistics. \"\"\" for slice_stats in stats.datasets: if slice_stats.name == slice_key: result =", "specified slice statistics. \"\"\" for slice_stats in stats.datasets: if slice_stats.name == slice_key: result", ") -> Optional['FeatureView']: \"\"\"Retrieve a feature if it exists. Features specified within the", "TypeError: If the input statistics is not of the expected type. ValueError: If", "of file paths of files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If", "y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if", "protocol buffer. feature_path: The path of the feature whose statistics to obtain from", "% dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve", "if performance becomes an issue. results = [] for feature in self.proto().features: if", "from tfx_bsl import statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE", "disk as a DatasetListView. Args: input_path_prefix: If passed, loads files starting with this", "A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of the slice. Returns: Statistics of", "= io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString()) stats =", "types.FeaturePath, Iterable[str] consisting of path steps, or a str, which is converted to", "a length one path. Returns: A FeatureView, or None if feature_id is not", "def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in case we don't need", "on a deriver name and its inputs. Args: deriver_name: The name of a", "== input_paths is None: raise ValueError('Must provide one of input_paths_prefix, input_paths.') if io_provider", "are Dicts with keys denoting name of the custom statistic and values denoting", "input path does not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\"", "the underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index() return", "\"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s, should be", "statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O':", "% type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:", "feature statistics from the dataset statistics. Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path:", "# TODO(b/221453427): Consider indexing if performance becomes an issue. results = [] for", "the underlying proto.\"\"\" return self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a", "dict is the name of the custom statistic and the value is the", "stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\"", "has unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]]", "= 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value decoded", "= stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def", "if it cannot be decoded. Args: value: The bytes value to decode. Returns:", "False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in case we don't", "proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str]", "import tensorflow as tf from tensorflow_data_validation import constants from tensorflow_data_validation import types from", "StopIteration: return result except Exception as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path:", "DatasetFeatureStatisticsList proto. Raises: TypeError: If the input proto is not of the expected", "stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored", "Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str,", "= False def _init_index(self): \"\"\"Initializes internal indices. Noop if already initialized.\"\"\" if self._initialized:", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "= True for i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match =", "stats_proto self._slice_map = {} # type: Dict[str, DatasetView] self._initialized = False def _init_index(self):", "proto by name (instead of path) are normalized to a length 1 path,", "If the input statistics is not of the expected type. ValueError: If the", "None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for _, v", "converted to a length one path. Returns: A FeatureView, or None if feature_id", "A list of file paths of files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional", "input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read a", "feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is", "statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in text format. Args: input_path: File", "stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from", "single feature. This class provides accessor methods, as well as access to the", "the name of the custom statistic and the value is the numeric value", "be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name", "# statistics (e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if", "return None class CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\" def __init__(self, stats_proto:", "does not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try:", "example, x.numeric_statistics() instead of x.proto().num_stats) in order to support future extension of the", "with the License. # You may obtain a copy of the License at", "a str, which is converted to a length one path. Returns: A FeatureView,", "by name (instead of path) are normalized to a length 1 path, and", "return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a '", "in self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized =", "or a str, which is converted to a length one path. Returns: A", "# LINT.IfChange # Semantic domain information can be passed to schema inference using", "FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] )", "self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys() class DatasetView(object): \"\"\"View of", "not of the expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats", "found in the dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is", "-> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath,", "The path of the feature. Returns: A FeatureNameStatistic proto containing the custom statistics", "def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a", "io_provider is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths =", "law or agreed to in writing, software # distributed under the License is", "found in the dataset statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text", "expected type. ValueError: If the custom statistic is not found in the feature", "statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']:", "is not found in the dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise", "feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s,", "except UnicodeError: return None return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get", "statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path: Text) ->", "return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for", "feature statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get", "continue if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match", "result = stat else: raise ValueError('Duplicate custom_stats for name %s' % name) return", "underlying proto.\"\"\" return self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) ->", "a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in stats.features: if feature_path ==", "Returns: A FeatureView, or None if feature_id is not present. \"\"\" feature_id =", "if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats return", "already initialized.\"\"\" if self._initialized: return field_identifier = None for j, feature in enumerate(self._statistics.features):", "must be specified with either path or name within a' ' Dataset.') if", "file path. The file should be a one-record TFRecord file or a plain", "input_path: File path from which to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList", "sharded DatasetFeatureStatisticsList from disk as a DatasetListView. Args: input_path_prefix: If passed, loads files", "within a' ' Dataset.') if field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id", "be specified with either path or name within a' ' Dataset.') if field_identifier", "import numpy as np import pyarrow as pa import tensorflow as tf from", "decoded. \"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError: return None return decoded_value def", "%s, should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path,", "A DatasetFeatureStatisticsList proto. \"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it)", "for j, feature in enumerate(self._statistics.features): if field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif", "if io_provider is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "None ) -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk as a DatasetListView.", "== 'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats return None class", "now). Raises: TypeError: if the type is not supported. \"\"\" if pa.types.is_null(arrow_type): return", "did not look like a TFRecord. Try reading as a plain ' 'file.',", "decoded as utf-8, or None, if the value cannot be decoded. \"\"\" try:", "else: raise ValueError('Duplicate custom_stats for name %s' % name) return result # TODO(b/202910677):", "type: Dict[types.FeaturePath, int] self._cross_feature_map = { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics", "> 1: raise ValueError('Ambiguous result, %d features matched' % len(results)) if len(results) ==", "Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type. Args: feature_path: path of the feature.", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of statistics for multiple datasets (slices).\"\"\" def __init__(self,", "stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def load_sharded_statistics(", "v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index()", "of path steps, or a str, which is converted to a length one", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "not found in the dataset statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name:", "Matches validation_derived_source deriver_name. source_paths: Source paths for derived features. Matches validation_derived_source.source_path. Returns: FeatureView", "division from __future__ import print_function import logging from typing import Dict, Iterable, Optional,", "elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported", "provides accessor methods, as well as access to the underlying proto. Where possible,", "derived features. Matches validation_derived_source.source_path. Returns: FeatureView of derived feature. Raises: ValueError if multiple", "output_path: File path to write the DatasetFeatureStatisticsList proto. Raises: TypeError: If the input", "The path of the feature whose statistics to obtain from the dataset statistics.", "the expected type. ValueError: If the input feature is not found in the", "text_format.Parse(stats_text, stats_proto) return stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized", "__init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying", "True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_slice(self,", "raise TypeError('Expected feature column to be a ' '(Large)List<primitive|struct> or null, but feature", "types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps)", "a one-record TFRecord file or a plain file containing the statistics proto in", "the statistics proto in Proto Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError:", "self._slice_map = {} # type: Dict[str, DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes", "\"\"\"Get custom statistics from the feature statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer.", "'string_stats': return self._statistics.string_stats return None def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if", "self._slice_map.keys() class DatasetView(object): \"\"\"View of statistics for a dataset (slice).\"\"\" def __init__(self, stats_proto:", "file. Args: input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it", "statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange #", "\"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in text format. Args: input_path: File path", "\"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if index is None:", "of the proto. \"\"\" def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self)", "= text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified", "def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_feature( self,", "Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived feature based on a deriver name and", "if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif", "{ FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information':", "statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads", "pattern corresponding to the output of the provided io_provider. input_paths: A list of", "{}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT", "import Dict, Iterable, Optional, Sequence, Text, Tuple, Union import numpy as np import", "% type(stats).__name__) for feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise", "type. ValueError: If the custom statistic is not found in the feature statistics.", "a DatasetFeatureStatisticsList proto to a file in text format. Args: stats: A DatasetFeatureStatisticsList", "= { } # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int] self._statistics = stats_proto self._initialized =", "in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature", "in text format. Args: stats: A DatasetFeatureStatisticsList proto. output_path: File path to write", "statistics to obtain from the dataset statistics. Returns: A FeatureNameStatistics protocol buffer. Raises:", "Add convenience methods for retrieving first-party custom # statistics (e.g., MI, NLP). def", "from file. Args: input_path: Data statistics file path. The file should be a", "= stats_proto def proto(self) -> statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def", "Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats from", "statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s, should be a ' 'DatasetFeatureStatisticsList", "proto.\"\"\" return self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by", "None for j, feature in enumerate(self._statistics.features): if field_identifier is None: field_identifier = feature.WhichOneof('field_id')", "the feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type", "= j for j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id", "None, if the value cannot be decoded. \"\"\" try: decoded_value = value.decode('utf-8') except", "types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] =", "sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto)", "stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {} # type: Dict[str, DatasetView] self._initialized", "!= field_identifier: raise ValueError( 'Features must be specified with either path or name", "decode. Returns: The value decoded as utf-8, or None, if the value cannot", "\"\"\" it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl( [input_path]) result = next(it) try: next(it) raise ValueError('load_stats_tfrecord expects", "be decoded. Args: value: The bytes value to decode. Returns: The value decoded", "the numeric value of the custom statistic of that feature. Ex. { 'Mutual", "by feature name to have deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path in", "# Sort alphabetically by statistic name to have deterministic ordering stat_names = sorted(stats_values.keys())", "proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key: str)", "self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self) -> Iterable[str]: self._init_index() return self._slice_map.keys()", "feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]:", "# Semantic domain information can be passed to schema inference using a #", "elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise", "the custom statistic and the value is the numeric value of the custom", "statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString())", "ValueError('Invalid slice key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto", "# Sort alphabetically by feature name to have deterministic ordering feature_paths = sorted(stats_values.keys())", "Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature statistics. Args: feature_stats:", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl", "from the dataset statistics. Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path: The path", "None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath,", "TypeError: If the input feature statistics is not of the expected type. ValueError:", "field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features", "file containing the statistics proto in Proto Text Format. Returns: A DatasetFeatureStatisticsList proto.", "-> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from file. Args: input_path: Data statistics file", "arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type. Args: feature_path: path", "feature name to have deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths:", "that feature. Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path: The path", "self._feature_map.get(feature_id, None) if index is None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self,", "Returns: The custom statistic. Raises: TypeError: If the input feature statistics is not", "class FeatureView(object): \"\"\"View of a single feature. This class provides accessor methods, as", "present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if index is", "Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype. Args: dtype: Numpy dtype. Returns: A", "return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy", "1: raise ValueError('Ambiguous result, %d features matched' % len(results)) if len(results) == 1:", "= sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add()", "feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path:", "Dict, Iterable, Optional, Sequence, Text, Tuple, Union import numpy as np import pyarrow", ") -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats from input dict. Args:", "return results.pop() return None class FeatureView(object): \"\"\"View of a single feature. This class", "type %s, should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats)", "this file except in compliance with the License. # You may obtain a", "input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter:", "it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id =", "custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the", "Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists, or", "{} # type: Dict[str, DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\"", "isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s, should be a ' 'DatasetFeatureStatistics", "DatasetFeatureStatistics protocol buffer. feature_path: The path of the feature whose statistics to obtain", "convenience methods for retrieving first-party custom # statistics (e.g., MI, NLP). def numeric_statistics(self)", "Consider indexing if performance becomes an issue. results = [] for feature in", "not None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list", "proto from file. Args: input_path: Data statistics file path. The file should be", "class CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics", "statistics. Args: stats: A DatasetFeatureStatistics protocol buffer. feature_path: The path of the feature", "if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None def bytes_statistics(self) ->", "name of a deriver. Matches validation_derived_source deriver_name. source_paths: Source paths for derived features.", "1: for _, v in self._slice_map.items(): return v return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None) def list_slices(self)", "DatasetFeatureStatisticsList from disk as a DatasetListView. Args: input_path_prefix: If passed, loads files starting", "arrow_type is null (which means it cannot be determined for now). Raises: TypeError:", "\"\"\"Get statistics associated with a specific slice. Args: stats: A DatasetFeatureStatisticsList protocol buffer.", "Optional['DatasetView']: self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) ==", "= types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id]", "feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) != len(", "self._init_index() if len(self._slice_map) == 1: for _, v in self._slice_map.items(): return v return", "return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported arrow type:", "[input_path]) result = next(it) try: next(it) raise ValueError('load_stats_tfrecord expects a single record.') except", "Raises: ValueError: If the input statistics proto does not have the specified slice", "0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto containing the custom statistics for", "under the License. \"\"\"Utilities for stats generators.\"\"\" from __future__ import absolute_import from __future__", "Args: dtype: Numpy dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type(", "values are Dicts with keys denoting name of the custom statistic and values", "DatasetFeatureStatisticsList proto. output_path: File path to write the DatasetFeatureStatisticsList proto. Raises: TypeError: If", "TypeError('feature_stats is of type %s, should be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__)", "self._initialized = False def _init_index(self): \"\"\"Initializes internal indices. Noop if already initialized.\"\"\" if", "result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name to have deterministic ordering", "self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics", "to obtain from the dataset statistics. Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError:", "arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a ' '(Large)List<primitive|struct> or null, but", "== 'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which ==", "decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype.", "of type %s, should be a ' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats", "data statistics proto from file. Args: input_path: Data statistics file path. The file", "FeatureNameStatistics protocol buffer. custom_stats_name: The name of the custom statistics to obtain from", "__init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath, int] self._cross_feature_map = {", "= statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name to have deterministic ordering", "Args: input_path: Data statistics file path. Returns: A DatasetFeatureStatisticsList proto. \"\"\" it =", "raise IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except", "path steps, or a str, which is converted to a length one path.", "supported. \"\"\" if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column", "TypeError( 'stats is of type %s, should be a ' 'DatasetFeatureStatisticsList proto.' %", "io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString()) stats = statistics_pb2.DatasetFeatureStatisticsList()", "feature in enumerate(self._statistics.features): if field_identifier is None: field_identifier = feature.WhichOneof('field_id') elif feature.WhichOneof('field_id') !=", "feature type from Arrow type. Args: feature_path: path of the feature. arrow_type: Arrow", "raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics", "internal mappings.\"\"\" # Lazily initialize in case we don't need an index. if", "not found in the feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats", "self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve", "list of file paths of files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider.", "= None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView:", "self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature if it", "to a length 1 path, and can be referred to as such. Args:", "is not of the expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError(", "try: decoded_value = value.decode('utf-8') except UnicodeError: return None return decoded_value def get_feature_type( dtype:", "deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path)", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "to as such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting of path steps, or", "DatasetView(object): \"\"\"View of statistics for a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map", "== 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map:", "if the value cannot be decoded. \"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError:", "feature_path: path of the feature. arrow_type: Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or", "feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto(", "Args: stats: A DatasetFeatureStatisticsList protocol buffer. slice_key: Slice key of the slice. Returns:", "pa import tensorflow as tf from tensorflow_data_validation import constants from tensorflow_data_validation import types", "Text, Tuple, Union import numpy as np import pyarrow as pa import tensorflow", "None class CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics):", "value to decode. Returns: The value decoded as utf-8, or None, if the", "Optional['FeatureView']: \"\"\"Retrieve a feature if it exists. Features specified within the underlying proto", "j, cross_feature in enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise", "try: return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File %s did not look", "if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s, should be a", "to the output of the provided io_provider. input_paths: A list of file paths", "required by applicable law or agreed to in writing, software # distributed under", "A types.FeaturePath, Iterable[str] consisting of path steps, or a str, which is converted", "0.5, 'Correlation': 0.1 } feature_path: The path of the feature. Returns: A FeatureNameStatistic", "j self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return", "multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map = {}", "value of the custom statistic for the feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual", "the value cannot be decoded. \"\"\" try: decoded_value = value.decode('utf-8') except UnicodeError: return", "= False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in case we", "in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def", "None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix) acc", "return result raise ValueError('Invalid slice key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads", "\"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve", "for name %s' % name) return result # TODO(b/202910677): Add convenience methods for", "the specific language governing permissions and # limitations under the License. \"\"\"Utilities for", "for each feature in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically", "io_provider. input_paths: A list of file paths of files containing sharded DatasetFeatureStatisticsList protos.", "should be used in place of proto access (for example, x.numeric_statistics() instead of", "-> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats from input dict. Args: stats_values:", "specific language governing permissions and # limitations under the License. \"\"\"Utilities for stats", "ValueError('Custom statistics %s not found in the feature statistics.' % custom_stats_name) def get_slice_stats(", "'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath:", "multiple derived features match. \"\"\" # TODO(b/221453427): Consider indexing if performance becomes an", "if stat.name == name: if result is None: result = stat else: raise", "feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized", "statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name to have deterministic ordering feature_paths =", "range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if all_match: results.append(FeatureView(feature))", "a single cross feature.\"\"\" def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics): self._statistics = stats_proto def proto(self)", "if feature_id is not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id,", "string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats return None def", "the specified DatasetFeatureStatisticsList proto stored in text format. Args: input_path: File path from", "a' ' Dataset.') if field_identifier == 'name': feature_id = types.FeaturePath([feature.name]) else: feature_id =", "def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto from TFRecord file.", "or a plain file containing the statistics proto in Proto Text Format. Returns:", "% type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val'))", "in case we don't need an index. if self._initialized: return for dataset in", "-> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name:", "stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s not found in", "an issue. results = [] for feature in self.proto().features: if feature.validation_derived_source is None:", "LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]: \"\"\"Returns the value decoded as utf-8, or", "= _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id,", "input feature statistics is not of the expected type. ValueError: If the custom", "list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature(", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats':", "a feature if it exists. Features specified within the underlying proto by name", "Union import numpy as np import pyarrow as pa import tensorflow as tf", "self.proto().features: if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths)", "= _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text,", "where the key of the dict is the name of the custom statistic", "FeatureView, or None if feature_id is not present. \"\"\" feature_id = _normalize_feature_id(feature_id) self._init_index()", "a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes)", "proto in Proto Text Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If the", "== custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found in the", "= (x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if index is None: return None", "\"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key: str) -> Optional['DatasetView']: self._init_index()", "name %s' % name) return result # TODO(b/202910677): Add convenience methods for retrieving", "input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if", "inference using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def", "custom statistics from the feature statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name:", "'stats is of type %s, should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__)", "-> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return", "indices. Noop if already initialized.\"\"\" if self._initialized: return field_identifier = None for j,", "of path) are normalized to a length 1 path, and can be referred", "string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return", "Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual", "the custom statistic of that feature. Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1", "y_path) index = self._cross_feature_map.get(feature_id, None) if index is None: return None return CrossFeatureView(self._statistics.cross_features[index])", "\"\"\"Writes a DatasetFeatureStatisticsList proto to a file in text format. Args: stats: A", ") -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists, or None.\"\"\" x_path =", "statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain information can be passed to schema", "stats_list in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString()) stats = statistics_pb2.DatasetFeatureStatisticsList() stats.ParseFromString(acc.Get()) return", "except Exception: # pylint: disable=broad-except logging.info('File %s did not look like a TFRecord.", "for feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return feature_stats raise ValueError('Feature %s", "feature. This class provides accessor methods, as well as access to the underlying", "statistics_pb2.FeatureNameStatistics): self._statistics = stats_proto def proto(self) -> statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return", "underlying proto by name (instead of path) are normalized to a length 1", "ValueError('Duplicate custom_stats for name %s' % name) return result # TODO(b/202910677): Add convenience", "in the feature statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) ->", "statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.') def load_statistics( input_path: Text) ->", "custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]: \"\"\"Retrieve a custom_statistic by name.\"\"\" result = None", "if arrow_type is null (which means it cannot be determined for now). Raises:", "in place of proto access (for example, x.numeric_statistics() instead of x.proto().num_stats) in order", "-> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset statistics. Args: stats: A DatasetFeatureStatistics", "get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype. Args: dtype:", "None def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]: \"\"\"Retrieve struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') ==", "result raise ValueError('Invalid slice key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data", "Information': 0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto containing the custom statistics", "pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type):", "self._statistics.datasets: if dataset.name in self._slice_map: raise ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name]", "stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes", "self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for _,", "Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError: If the input statistics is not", "} Returns: DatasetFeatureStatistics proto containing the custom statistics for each feature in the", "in order to support future extension of the proto. \"\"\" def __init__(self, stats_proto:", "proto containing the custom statistics for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto())", "dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the", "value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type):", "struct statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None def", "where the keys are feature paths, and values are Dicts with keys denoting", "= stats_proto self._initialized = False def _init_index(self): \"\"\"Initializes internal indices. Noop if already", "# you may not use this file except in compliance with the License.", "a single record.') except StopIteration: return result except Exception as e: raise e", "IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File", "for feature in self.proto().features: if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name != deriver_name:", "all_match = False break if all_match: results.append(FeatureView(feature)) if len(results) > 1: raise ValueError('Ambiguous", "input_path_prefix: If passed, loads files starting with this prefix and ending with a", "return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return", "result except Exception as e: raise e def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath )", "def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for _, v in", "self._initialized: return field_identifier = None for j, feature in enumerate(self._statistics.features): if field_identifier is", "def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] = None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] =", "Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it", "or name within a' ' Dataset.') if field_identifier == 'name': feature_id = types.FeaturePath([feature.name])", "This class provides accessor methods, as well as access to the underlying proto.", "proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path:", "statistics %s not found in the feature statistics.' % custom_stats_name) def get_slice_stats( stats:", "statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s, should be a ' 'DatasetFeatureStatistics proto.'", "to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList()", "protocol buffer. Raises: TypeError: If the input statistics is not of the expected", "statistic. Raises: TypeError: If the input feature statistics is not of the expected", "The file should be a one-record TFRecord file or a plain file containing", "custom statistic and the value is the numeric value of the custom statistic", "have the specified slice statistics. \"\"\" for slice_stats in stats.datasets: if slice_stats.name ==", "tensorflow_data_validation import constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils", "the value of the custom statistic for the feature. Ex. { FeaturePath(('feature_1',)): {", "in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort alphabetically by feature name", "type. ValueError: If the input feature is not found in the dataset statistics.", "if available.\"\"\" if self._statistics.WhichOneof('stats') == 'struct_stats': return self._statistics.struct_stats return None def common_statistics(self) ->", "else: feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate feature %s' %", "!= deriver_name: continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match = True for", "source_paths: Source paths for derived features. Matches validation_derived_source.source_path. Returns: FeatureView of derived feature.", "format. Args: input_path: File path from which to load the DatasetFeatureStatisticsList proto. Returns:", "and its inputs. Args: deriver_name: The name of a deriver. Matches validation_derived_source deriver_name.", "__future__ import division from __future__ import print_function import logging from typing import Dict,", "if self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve", "name (instead of path) are normalized to a length 1 path, and can", "License for the specific language governing permissions and # limitations under the License.", "return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return", "statistic is not found in the feature statistics. \"\"\" if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics):", "provide one of input_paths_prefix, input_paths.') if io_provider is None: io_provider = statistics_io_impl.get_io_provider() if", "= True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def", "not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type %s, should be a '", "statistics_pb2.CrossFeatureStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None,", "\"License\"); # you may not use this file except in compliance with the", "with either path or name within a' ' Dataset.') if field_identifier == 'name':", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "feature if it exists. Features specified within the underlying proto by name (instead", "statistics_pb2.FeatureNameStatistics: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def custom_statistic(self, name: str) -> Optional[statistics_pb2.CustomStatistic]:", "if result is None: result = stat else: raise ValueError('Duplicate custom_stats for name", "= statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset in stats_list.datasets:", "as pa import tensorflow as tf from tensorflow_data_validation import constants from tensorflow_data_validation import", "%s, should be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in stats.features:", "derived feature based on a deriver name and its inputs. Args: deriver_name: The", "License. \"\"\"Utilities for stats generators.\"\"\" from __future__ import absolute_import from __future__ import division", "return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats')", "get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from Arrow type.", "num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList", "not have the specified slice statistics. \"\"\" for slice_stats in stats.datasets: if slice_stats.name", "list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self) ->", "instead of x.proto().num_stats) in order to support future extension of the proto. \"\"\"", "= _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if index is None: return None", "Args: value: The bytes value to decode. Returns: The value decoded as utf-8,", "but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return", "which == 'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which", "feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name to", "None: raise ValueError('Must provide one of input_paths_prefix, input_paths.') if io_provider is None: io_provider", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "Raises: TypeError: If the input feature statistics is not of the expected type.", "not look like a TFRecord. Try reading as a plain ' 'file.', input_path)", "in self._statistics.custom_stats: if stat.name == name: if result is None: result = stat", "value: The bytes value to decode. Returns: The value decoded as utf-8, or", "_normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id, None)", "get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific", "the specified slice statistics. \"\"\" for slice_stats in stats.datasets: if slice_stats.name == slice_key:", "slice. Raises: ValueError: If the input statistics proto does not have the specified", "Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is null (which means it", "None def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats')", "= None ) -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk as a", "= { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U':", "stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom stats", "if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue if (len(source_paths) !=", "' 'file.', input_path) return load_stats_text(input_path) def _normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) ->", "TODO(b/202910677): Add convenience methods for retrieving first-party custom # statistics (e.g., MI, NLP).", "-> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1: for _, v in self._slice_map.items(): return", "limitations under the License. \"\"\"Utilities for stats generators.\"\"\" from __future__ import absolute_import from", "io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset", "Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature if it exists. Features", "(for example, x.numeric_statistics() instead of x.proto().num_stats) in order to support future extension of", "\"\"\" if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to", "x.proto().num_stats) in order to support future extension of the proto. \"\"\" def __init__(self,", "A FeatureView, or None if feature_id is not present. \"\"\" feature_id = _normalize_feature_id(feature_id)", "the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path,", "new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates", "if it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path = _normalize_feature_id(y_path) self._init_index() feature_id", "feature_path: The path of the feature whose statistics to obtain from the dataset", "len(results) > 1: raise ValueError('Ambiguous result, %d features matched' % len(results)) if len(results)", "\"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists", "'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, }", "-> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if which ==", "self._statistics.WhichOneof('stats') if which == 'num_stats': return self._statistics.num_stats.common_stats if which == 'string_stats': return self._statistics.string_stats.common_stats", "feature_id) self._cross_feature_map[feature_id] = j self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Retrieve the", "name to have deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name,", "pyarrow as pa import tensorflow as tf from tensorflow_data_validation import constants from tensorflow_data_validation", "if dataset.name in self._slice_map: raise ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name] =", "continue if (len(source_paths) != len( feature.validation_derived_source.source_path)): continue all_match = True for i in", "raise TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values:", "result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name to have deterministic", "A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) ->", "\"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats return None", "with a pattern corresponding to the output of the provided io_provider. input_paths: A", "Optional StatisticsIOProvider. If unset, a default will be constructed. Returns: A DatasetListView containing", "return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps, types.FeaturePath): return name_or_path_or_steps return types.FeaturePath(name_or_path_or_steps) class DatasetListView(object): \"\"\"View of", "text format. Args: input_path: File path from which to load the DatasetFeatureStatisticsList proto.", "make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: \"\"\"Builds DatasetFeatureStatistics proto with custom", "of the slice. Returns: Statistics of the specific slice. Raises: ValueError: If the", "is the name of the custom statistic and the value is the numeric", "Optional['FeatureView']: \"\"\"Retrieve a derived feature based on a deriver name and its inputs.", "Arrow DataType. Returns: A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is null (which", "\"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]]", "CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features(", ") -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk as a DatasetListView. Args:", "file in text format. Args: stats: A DatasetFeatureStatisticsList proto. output_path: File path to", "stats_iter = io_provider.record_iterator_impl(input_paths) for stats_list in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString()) stats", "raise ValueError('Invalid slice key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics", "custom statistics for each feature in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() #", "retrieving first-party custom # statistics (e.g., MI, NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve", "2.0 (the \"License\"); # you may not use this file except in compliance", "to have deterministic ordering feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto =", "self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve a derived", "len(results) == 1: return results.pop() return None class FeatureView(object): \"\"\"View of a single", "'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in stats.features: if feature_path == types.FeaturePath.from_proto(feature_stats.path): return", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "Dict[str,float] where the key of the dict is the name of the custom", "from a file. Args: input_path: File path from which to load the DatasetFeatureStatisticsList", "# # Unless required by applicable law or agreed to in writing, software", "statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto from a file. Args: input_path: File path", "input feature is not found in the dataset statistics. \"\"\" if not isinstance(stats,", "express or implied. # See the License for the specific language governing permissions", "determined for now). Raises: TypeError: if the type is not supported. \"\"\" if", "isinstance(feature_stats, statistics_pb2.FeatureNameStatistics): raise TypeError('feature_stats is of type %s, should be a ' 'FeatureNameStatistics", "as well as access to the underlying proto. Where possible, accessors should be", "A Dict[FeaturePath, Dict[str,float]] where the keys are feature paths, and values are Dicts", "dtype. Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type:", "if not tf.io.gfile.exists(input_path): raise IOError('Invalid input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception:", "if multiple derived features match. \"\"\" # TODO(b/221453427): Consider indexing if performance becomes", "in the dataset statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text )", "either express or implied. # See the License for the specific language governing", "__future__ import print_function import logging from typing import Dict, Iterable, Optional, Sequence, Text,", "statistics proto. Returns: The custom statistic. Raises: TypeError: If the input feature statistics", "file. Args: input_path: Data statistics file path. The file should be a one-record", "= _normalize_feature_id(y_path) self._init_index() feature_id = (x_path, y_path) index = self._cross_feature_map.get(feature_id, None) if index", "passed to schema inference using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info'", "buffer. custom_stats_name: The name of the custom statistics to obtain from the feature", "== slice_key: result = statistics_pb2.DatasetFeatureStatisticsList() result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.') def", "self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized = True", "containing the custom statistics for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) #", "get_cross_feature( self, x_path: Union[str, types.FeaturePath, Iterable[str]], y_path: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['CrossFeatureView']:", "\"\"\"Retrieve common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if which == 'num_stats': return", "of input_paths_prefix, input_paths.') if io_provider is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix is", "feature.validation_derived_source.source_path[i])): all_match = False break if all_match: results.append(FeatureView(feature)) if len(results) > 1: raise", "for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return", "None) if index is None: return None return FeatureView(self._statistics.features[index]) def get_cross_feature( self, x_path:", "def get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature", "'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6", "None: result = stat else: raise ValueError('Duplicate custom_stats for name %s' % name)", "dataset statistics.' % feature_path) def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float,", "the value is the numeric value of the custom statistic of that feature.", "FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto containing", "statistics from the feature statistics. Args: feature_stats: A FeatureNameStatistics protocol buffer. custom_stats_name: The", "Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads the specified DatasetFeatureStatisticsList proto stored in text format. Args:", "the License. # You may obtain a copy of the License at #", "return result # TODO(b/202910677): Add convenience methods for retrieving first-party custom # statistics", "deriver_name. source_paths: Source paths for derived features. Matches validation_derived_source.source_path. Returns: FeatureView of derived", "feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one feature. Args: stats_values:", "Semantic domain information can be passed to schema inference using a # CustomStatistic", "file or a plain file containing the statistics proto in Proto Text Format.", "result.datasets.add().CopyFrom(slice_stats) return result raise ValueError('Invalid slice key.') def load_statistics( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:", "common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]: \"\"\"Retrieve common statistics if available.\"\"\" which = self._statistics.WhichOneof('stats') if which", "FeatureNameStatistics proto for one feature. Args: stats_values: A Dict[str,float] where the key of", "Returns: A statistics_pb2.FeatureNameStatistics.Type value. \"\"\" return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType)", "custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics", "float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: \"\"\"Creates the FeatureNameStatistics proto for one feature. Args:", "bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return", "raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id] = j self._initialized = True def", "as tf from tensorflow_data_validation import constants from tensorflow_data_validation import types from tensorflow_data_validation.arrow import", "None if arrow_type is null (which means it cannot be determined for now).", "If the input path does not exist. \"\"\" if not tf.io.gfile.exists(input_path): raise IOError('Invalid", "need an index. if self._initialized: return for dataset in self._statistics.datasets: if dataset.name in", "None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index()", "self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\" def __init__(self,", "% name) return result # TODO(b/202910677): Add convenience methods for retrieving first-party custom", "index is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature", "statistics for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by", "a custom_statistic by name.\"\"\" result = None for stat in self._statistics.custom_stats: if stat.name", "of the expected type. ValueError: If the input feature is not found in", "underlying proto.\"\"\" return self._statistics def load_sharded_statistics( input_path_prefix: Optional[str] = None, input_paths: Optional[Iterable[str]] =", "raise ValueError('Feature %s not found in the dataset statistics.' % feature_path) def get_custom_stats(", "DatasetFeatureStatisticsList proto. Raises: IOError: If the input path does not exist. \"\"\" if", "# TODO(b/202910677): Add convenience methods for retrieving first-party custom # statistics (e.g., MI,", "be passed to schema inference using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO =", "= self._cross_feature_map.get(feature_id, None) if index is None: return None return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self)", "a dataset (slice).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics): self._feature_map = {} # type: Dict[types.FeaturePath,", "A DatasetFeatureStatistics protocol buffer. feature_path: The path of the feature whose statistics to", "Format. Returns: A DatasetFeatureStatisticsList proto. Raises: IOError: If the input path does not", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "statistics to obtain from the feature statistics proto. Returns: The custom statistic. Raises:", "from disk as a DatasetListView. Args: input_path_prefix: If passed, loads files starting with", "Returns: The value decoded as utf-8, or None, if the value cannot be", "identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']:", "for multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map =", "Raises: TypeError: If the input statistics is not of the expected type. ValueError:", "\"\"\"Utilities for stats generators.\"\"\" from __future__ import absolute_import from __future__ import division from", "tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils", "numeric value of the custom statistic of that feature. Ex. { 'Mutual Information':", "not found in the dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics", "stats: A DatasetFeatureStatisticsList proto. output_path: File path to write the DatasetFeatureStatisticsList proto. Raises:", "feature. Ex. { FeaturePath(('feature_1',)): { 'Mutual Information': 0.5, 'Correlation': 0.1 }, FeaturePath(('feature_2',)): {", "of the feature. Returns: A FeatureNameStatistic proto containing the custom statistics for a", "statistic of that feature. Ex. { 'Mutual Information': 0.5, 'Correlation': 0.1 } feature_path:", "import absolute_import from __future__ import division from __future__ import print_function import logging from", "dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype. Args: dtype: Numpy", "place of proto access (for example, x.numeric_statistics() instead of x.proto().num_stats) in order to", "get_feature( self, feature_id: Union[str, types.FeaturePath, Iterable[str]] ) -> Optional['FeatureView']: \"\"\"Retrieve a feature if", "from the feature statistics proto. Returns: The custom statistic. Raises: TypeError: If the", "corresponding to the output of the provided io_provider. input_paths: A list of file", "a file in text format. Args: stats: A DatasetFeatureStatisticsList proto. output_path: File path", "' 'FeatureNameStatistics proto.' % type(feature_stats).__name__) for custom_stats in feature_stats.custom_stats: if custom_stats.name == custom_stats_name:", "result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a", "== name: if result is None: result = stat else: raise ValueError('Duplicate custom_stats", "get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics, feature_path: types.FeaturePath ) -> statistics_pb2.FeatureNameStatistics: \"\"\"Get feature statistics from the dataset", "_normalize_feature_id( name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps])", "-> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'string_stats': return self._statistics.string_stats", "path of the feature whose statistics to obtain from the dataset statistics. Returns:", "expected type. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type", "feature_id = types.FeaturePath.from_proto(feature.path) if feature_id in self._feature_map: raise ValueError('Duplicate feature %s' % feature_id)", "in feature_stats.custom_stats: if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s", "return stats_proto def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads a serialized DatasetFeatureStatisticsList proto", "statistics for multiple datasets (slices).\"\"\" def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList): self._statistics = stats_proto self._slice_map", "<filename>tensorflow_data_validation/utils/stats_util.py # Copyright 2018 Google LLC # # Licensed under the Apache License,", "Statistics of the specific slice. Raises: ValueError: If the input statistics proto does", "one feature. Args: stats_values: A Dict[str,float] where the key of the dict is", "\"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text)", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "feature based on a deriver name and its inputs. Args: deriver_name: The name", "None raise TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto(", "denoting the value of the custom statistic for the feature. Ex. { FeaturePath(('feature_1',)):", "load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File %s did not look like a", "_init_index(self): \"\"\"Initializes internal indices. Noop if already initialized.\"\"\" if self._initialized: return field_identifier =", "denoting name of the custom statistic and values denoting the value of the", "type %s, should be a ' 'DatasetFeatureStatistics proto.' % type(stats).__name__) for feature_stats in", "-> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return self._statistics def get_slice(self, slice_key: str) ->", "the keys are feature paths, and values are Dicts with keys denoting name", "statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature statistics. Args: feature_stats: A FeatureNameStatistics", "dataset statistics. Returns: A FeatureNameStatistics protocol buffer. Raises: TypeError: If the input statistics", "Where possible, accessors should be used in place of proto access (for example,", "import statistics from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = {", "(types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._cross_feature_map[feature_id]", "A DatasetListView containing the merged proto. \"\"\" if input_path_prefix is None == input_paths", "in self._slice_map: raise ValueError('Duplicate slice name %s' % dataset.name) self._slice_map[dataset.name] = DatasetView(dataset) self._initialized", "i in range(len(source_paths)): if (source_paths[i] != types.FeaturePath.from_proto( feature.validation_derived_source.source_path[i])): all_match = False break if", "access (for example, x.numeric_statistics() instead of x.proto().num_stats) in order to support future extension", "feature_id in self._feature_map: raise ValueError('Duplicate feature %s' % feature_id) self._feature_map[feature_id] = j for", "self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats': return self._statistics.struct_stats.common_stats", "in stats_iter: for dataset in stats_list.datasets: acc.MergeDatasetFeatureStatistics(dataset.SerializeToString()) stats = statistics_pb2.DatasetFeatureStatisticsList() stats.ParseFromString(acc.Get()) return DatasetListView(stats)", "loads files starting with this prefix and ending with a pattern corresponding to", "elif feature.WhichOneof('field_id') != field_identifier: raise ValueError( 'Features must be specified with either path", "absolute_import from __future__ import division from __future__ import print_function import logging from typing", "input path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File %s", "result = None for stat in self._statistics.custom_stats: if stat.name == name: if result", "from tensorflow_data_validation.utils import io_util from tfx_bsl import statistics from google.protobuf import text_format from", "arrow_util from tensorflow_data_validation.utils import statistics_io_impl from tensorflow_data_validation.utils import io_util from tfx_bsl import statistics", "self._init_index() return self._slice_map.get(slice_key, None) def get_default_slice(self) -> Optional['DatasetView']: self._init_index() if len(self._slice_map) == 1:", "return self._statistics.struct_stats.common_stats return None class CrossFeatureView(object): \"\"\"View of a single cross feature.\"\"\" def", "proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) ->", "is of type %s, should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text", "A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type is null (which means it cannot", "paths of files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset, a", "'string_stats': return self._statistics.string_stats.common_stats if which == 'bytes_stats': return self._statistics.bytes_stats.common_stats if which == 'struct_stats':", "file paths of files containing sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset,", "index = self._feature_map.get(feature_id, None) if index is None: return None return FeatureView(self._statistics.features[index]) def", "identifiers.\"\"\" self._init_index() return self._feature_map.keys() def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\"", "statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Get statistics associated with a specific slice. Args: stats: A DatasetFeatureStatisticsList protocol", "except in compliance with the License. # You may obtain a copy of", "sharded DatasetFeatureStatisticsList protos. io_provider: Optional StatisticsIOProvider. If unset, a default will be constructed.", "DatasetView(dataset) self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying proto.\"\"\" return", "in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None:", "statistics_io_impl.get_io_provider() if input_path_prefix is not None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter", "in the dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of", "performance becomes an issue. results = [] for feature in self.proto().features: if feature.validation_derived_source", "NLP). def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]: \"\"\"Retrieve numeric statistics if available.\"\"\" if self._statistics.WhichOneof('stats') ==", "}, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6 } } Returns: DatasetFeatureStatistics proto", "binary_mode=True)) return stats_proto def load_stats_tfrecord( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Loads data statistics proto", "def list_cross_features( self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]: \"\"\"Lists cross-feature identifiers.\"\"\" self._init_index() return self._cross_feature_map.keys() def", "with this prefix and ending with a pattern corresponding to the output of", "A DatasetFeatureStatisticsList proto. \"\"\" stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return", "to a length one path. Returns: A FeatureView, or None if feature_id is", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "is not None: input_paths = io_provider.glob(input_path_prefix) acc = statistics.DatasetListAccumulator() stats_iter = io_provider.record_iterator_impl(input_paths) for", "the input proto is not of the expected type. \"\"\" if not isinstance(stats,", "-> Optional[statistics_pb2.BytesStatistics]: \"\"\"Retrieve byte statistics if available.\"\"\" if self._statistics.WhichOneof('stats') == 'bytes_stats': return self._statistics.bytes_stats", "CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value: bytes) -> Optional[Text]:", "as such. Args: feature_id: A types.FeaturePath, Iterable[str] consisting of path steps, or a", "column to be a ' '(Large)List<primitive|struct> or null, but feature {} ' 'was", "the dataset statistics. \"\"\" if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics): raise TypeError('statistics is of type", "if custom_stats.name == custom_stats_name: return getattr(custom_stats, custom_stats.WhichOneof('val')) raise ValueError('Custom statistics %s not found", "\"\"\" result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) # Sort alphabetically by statistic name to have", "' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path:", "stats_values: A Dict[str,float] where the key of the dict is the name of", "have deterministic ordering stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return", "statistic and the value is the numeric value of the custom statistic of", "of the custom statistics to obtain from the feature statistics proto. Returns: The", "statistics for each feature in the dataset. \"\"\" result = statistics_pb2.DatasetFeatureStatistics() # Sort", "= {} # type: Dict[str, DatasetView] self._initialized = False def _init_index(self): \"\"\"Initializes internal", "'Correlation': 0.1 }, FeaturePath(('feature_2',)): { 'Mutual Information': 0.8, 'Correlation': 0.6 } } Returns:", "'Correlation': 0.1 } feature_path: The path of the feature. Returns: A FeatureNameStatistic proto", "a sharded DatasetFeatureStatisticsList from disk as a DatasetListView. Args: input_path_prefix: If passed, loads", "to schema inference using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' #", "self._slice_map[dataset.name] = DatasetView(dataset) self._initialized = True def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList: \"\"\"Retrieve the underlying", "Iterable, Optional, Sequence, Text, Tuple, Union import numpy as np import pyarrow as", "statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has", "unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] )", "Optional['CrossFeatureView']: \"\"\"Retrieve a cross-feature if it exists, or None.\"\"\" x_path = _normalize_feature_id(x_path) y_path", "self._initialized = False def _init_index(self): \"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in case", "feature_id = _normalize_feature_id(feature_id) self._init_index() index = self._feature_map.get(feature_id, None) if index is None: return", "statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic domain information can be passed", "path {}.'.format(input_path)) try: return load_stats_tfrecord(input_path) except Exception: # pylint: disable=broad-except logging.info('File %s did", "'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange", "name) return result # TODO(b/202910677): Add convenience methods for retrieving first-party custom #", "self._init_index() return self._cross_feature_map.keys() def get_derived_feature( self, deriver_name: str, source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']: \"\"\"Retrieve", "the custom statistic and values denoting the value of the custom statistic for", "using a # CustomStatistic with name=DOMAIN_INFO. DOMAIN_INFO = 'domain_info' # LINT.ThenChange(../anomalies/custom_domain_util.cc) def maybe_get_utf8(value:", "statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature statistics. Args: feature_stats: A FeatureNameStatistics protocol", "return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: \"\"\"Writes a DatasetFeatureStatisticsList proto", "length one path. Returns: A FeatureView, or None if feature_id is not present.", "custom_statistic by name.\"\"\" result = None for stat in self._statistics.custom_stats: if stat.name ==", "'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } # LINT.IfChange # Semantic", "None, io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from", "\"\"\"Initializes internal mappings.\"\"\" # Lazily initialize in case we don't need an index.", "feature in self.proto().features: if feature.validation_derived_source is None: continue if feature.validation_derived_source.deriver_name != deriver_name: continue", "if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a ' '(Large)List<primitive|struct> or", "a derived feature based on a deriver name and its inputs. Args: deriver_name:", "class provides accessor methods, as well as access to the underlying proto. Where", "np.dtype) -> Optional[types.FeatureNameStatisticsType]: \"\"\"Get feature type from numpy dtype. Args: dtype: Numpy dtype.", "Optional[statistics_io_impl.StatisticsIOProvider] = None ) -> DatasetListView: \"\"\"Read a sharded DatasetFeatureStatisticsList from disk as", "one of input_paths_prefix, input_paths.') if io_provider is None: io_provider = statistics_io_impl.get_io_provider() if input_path_prefix", "def get_custom_stats( feature_stats: statistics_pb2.FeatureNameStatistics, custom_stats_name: Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get", "FeatureNameStatistic proto containing the custom statistics for a feature. \"\"\" result = statistics_pb2.FeatureNameStatistics()", "enumerate(self._statistics.cross_features): feature_id = (types.FeaturePath.from_proto(cross_feature.path_x), types.FeaturePath.from_proto(cross_feature.path_y)) if feature_id in self._cross_feature_map: raise ValueError('Duplicate feature %s'", "it cannot be determined for now). Raises: TypeError: if the type is not", "self._statistics.WhichOneof('stats') == 'num_stats': return self._statistics.num_stats return None def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]: \"\"\"Retrieve string", "Text ) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]: \"\"\"Get custom statistics from the feature", "Union[str, types.FeaturePath, Iterable[str]] ) -> types.FeaturePath: if isinstance(name_or_path_or_steps, str): return types.FeaturePath([name_or_path_or_steps]) if isinstance(name_or_path_or_steps,", "path to write the DatasetFeatureStatisticsList proto. Raises: TypeError: If the input proto is", "not found in the feature statistics.' % custom_stats_name) def get_slice_stats( stats: statistics_pb2.DatasetFeatureStatisticsList, slice_key:", "return CrossFeatureView(self._statistics.cross_features[index]) def list_features(self) -> Iterable[types.FeaturePath]: \"\"\"Lists feature identifiers.\"\"\" self._init_index() return self._feature_map.keys() def" ]
[ "pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ == \"__main__\": # if running as script,", "mars_df.set_index('Description', inplace=True) # set column index # Convert dataframe into HTML format, add", "soupy(parse_html, 'html.parser' ) try: # find the relative image url latest_image_full = full_img_soup.select_one('figure.lede", "browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except", "Assign columns and set index of dataframe mars_df.columns = ['Description', 'Mars'] # adds", "# -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable", "element to find the first a tag and save it as `news_title` news_title", "slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None # return news_title, news_teaser_sum, news_date, latest_art_link return", "= \"Hemisphere\" Urlid = \"URL\" for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url)", "= hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not found\" except", "BeautifulSoup as soupy import pandas as pd import datetime as dt # --------------------------------------------------------------------------------------------------------------------------------", "f\"Search result not found\" except AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns')", "Browser from bs4 import BeautifulSoup as soupy import pandas as pd import datetime", "quit the browser parse_html = browser.html news_soup = soupy(parse_html, 'html.parser') try: # add", "= slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" #", "li.slide\", wait_time=1) # Convert the browser html to a soup object and then", "# -------------------------------------------------------------------------------------------------------------------------------- # Imports and Executables # -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from", "# -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all", "to get the function started, like a grandfather variable # browser function already", "= browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url})", "-------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced',", "scraping functions and stores results in a dictionary mars_total_data = { \"news_title\" :", "import pandas as pd import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data", "None # return news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- #", "latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # --------------------------------------------------------------------------------------------------------------------------------", "x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}')", "loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser html to a soup", "image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None # Use", "the function, basically a catalyst to get the function started, like a grandfather", "try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception errors return None", "mars_df.columns = ['Description', 'Mars'] # adds column names mars_df.set_index('Description', inplace=True) # set column", "format, add bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # --------------------------------------------------------------------------------------------------------------------------------", "and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of the function, basically", "# -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4 import BeautifulSoup as soupy import", "# df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ == \"__main__\": # if", "\"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph", "dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph # --------------------------------------------------------------------------------------------------------------------------------", "= True, doesnt show automated script in action # pylint: disable=unbalanced-tuple-unpacking # news_title,", "with none returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements", "# Use the parent element to find the first a tag and save", "Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced']", "# Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli", "mars_news(browser): # defined outside of the function, basically a catalyst to get the", "{ \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\"", "wait_time=1) # Convert the browser html to a soup object and then quit", "import Browser from bs4 import BeautifulSoup as soupy import pandas as pd import", "pandas as pd import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data #", "except AttributeError: return None, None # return news_title, news_teaser_sum, news_date, latest_art_link return news_title,", "defined outside of the function, basically a catalyst to get the function started,", "Use the parent element to find the first a tag and save it", "Use the parent element to find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except", "df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ == \"__main__\":", "`news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li", "a catalyst to get the function started, like a grandfather variable # browser", "click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the", "result not found\" except AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') #", "like a grandfather variable # browser function already defined outside # Visit the", "a').get('href')}\" # Use the parent element to find the paragraph text news_teaser_sum =", "browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt", "Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced',", "more info button and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info')", "of the function, basically a catalyst to get the function started, like a", "soupy import pandas as pd import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered", "= pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url", "Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of the function, basically a", "\"URL\" for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click", "in action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum", "slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur filter", "'html.parser') try: # add error handling, espescially for AttributeErros with try/except # if", "\"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(),", "# News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of", "import BeautifulSoup as soupy import pandas as pd import datetime as dt #", "'Mars'] # adds column names mars_df.set_index('Description', inplace=True) # set column index # Convert", "Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt show automated script in action", "# df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1)", "it will stop when its AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list li.slide')", "= f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def", "parse_html = browser.html news_soup = soupy(parse_html, 'html.parser') try: # add error handling, espescially", "# Visit the mars nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional", "soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search", "index of dataframe mars_df.columns = ['Description', 'Mars'] # adds column names mars_df.set_index('Description', inplace=True)", "hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles", "Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of the function,", "url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html =", "a tag and save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date =", "hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not found\"", "def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere", "# -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df =", "not found\" except AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere',", "element to find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None,", "and set index of dataframe mars_df.columns = ['Description', 'Mars'] # adds column names", "the chrome browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless =", "None # Use the base url to create an absolute url latest_imgurl =", "the browser parse_html = browser.html news_soup = soupy(parse_html, 'html.parser') try: # add error", "found\" except AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True)", "'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url =", "try: # add error handling, espescially for AttributeErros with try/except # if error,", "# defined outside of the function, basically a catalyst to get the function", "nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading page", "browser html to a soup object and then quit the browser parse_html =", "= ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris", "= news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur filter #", "\"Hemisphere\" Urlid = \"URL\" for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try:", "news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None # return news_title, news_teaser_sum, news_date,", "# return news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL", "HTML format, add bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres #", "Use the base url to create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return", "li.slide') # parent element, holds other elements to furthur filter # Use the", "'Valles Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\" Urlid = \"URL\"", "# find the relative image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError:", "# latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent element to find", ": dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph #", "if __name__ == \"__main__\": # if running as script, print scraped data print(scrape_all())", "its AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds", "return news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured", "Hemisphere = \"Hemisphere\" Urlid = \"URL\" for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'", ": mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- #", "df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return", "image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the more info button and", "\"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data # --------------------------------------------------------------------------------------------------------------------------------", "the resulting html with soup parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser' )", "[] Hemisphere = \"Hemisphere\" Urlid = \"URL\" for x in range(len(hemis_search_list)): url =", "html with soup parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try: #", "doesnt show automated script in action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date", "news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space", "URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the full", "None, None # return news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum # --------------------------------------------------------------------------------------------------------------------------------", "# Convert dataframe into HTML format, add bootstrap return mars_df.to_html(classes= \"table\") # --------------------------------------------------------------------------------------------------------------------------------", "for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click =", "full_image_elem.click() # Find the more info button and click that browser.is_element_present_by_text('more info', wait_time=1)", "browser.html news_soup = soupy(parse_html, 'html.parser') try: # add error handling, espescially for AttributeErros", "parent element to find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return", "= f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent element to find the paragraph", "show automated script in action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date =", "url to create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # --------------------------------------------------------------------------------------------------------------------------------", "and then quit the browser parse_html = browser.html news_soup = soupy(parse_html, 'html.parser') try:", "# Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click", "# set column index # Convert dataframe into HTML format, add bootstrap return", "pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if", "# pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser)", "object and then quit the browser parse_html = browser.html news_soup = soupy(parse_html, 'html.parser')", "for AttributeErros with try/except # if error, code will keep running, except it", "# headless = True, doesnt show automated script in action # pylint: disable=unbalanced-tuple-unpacking", "add error handling, espescially for AttributeErros with try/except # if error, code will", "news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL", "the executable path and initialize the chrome browser in splinter browser = Browser('chrome',", "outside # Visit the mars nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) #", "more_info_elem.click() # Parse the resulting html with soup parse_html = browser.html full_img_soup =", "separate scraping functions and stores results in a dictionary mars_total_data = { \"news_title\"", "# -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable path and initialize the chrome", "pd import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def", "as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set", "browser.visit(url) # Find and click the full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click()", "latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit()", "Set the executable path and initialize the chrome browser in splinter browser =", "resulting html with soup parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try:", "headless=True) # headless = True, doesnt show automated script in action # pylint:", "parent element to find the first a tag and save it as `news_title`", "the parent element to find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError:", "try: # find the relative image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except", "li a').get('href')}\" # Use the parent element to find the paragraph text news_teaser_sum", "import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all():", "# df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ ==", "= { \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date, #", "news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur filter # Use", "Convert dataframe into HTML format, add bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- #", "featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data #", "url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table #", "bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser):", "browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt show automated script", "AttributeError: return None # Use the base url to create an absolute url", "mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception errors return None #", "\"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\" :", "full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the more info button and click that", "return names_n_url if __name__ == \"__main__\": # if running as script, print scraped", "['Description', 'Mars'] # adds column names mars_df.set_index('Description', inplace=True) # set column index #", "= 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the full image button full_image_elem =", "browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser'", "function, basically a catalyst to get the function started, like a grandfather variable", "-------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of the function, basically a catalyst to", "= Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt show automated script in", "latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image # --------------------------------------------------------------------------------------------------------------------------------", "return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try:", "in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt show", "'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the full image button full_image_elem = browser.find_by_id('full_image')", "inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ == \"__main__\": #", "filter # Use the parent element to find the first a tag and", "browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try: # find the relative image url", "action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum =", "'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html", "pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser) #", "optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser html", "pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception errors return None # Assign columns", "Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\" Urlid = \"URL\" for", "function already defined outside # Visit the mars nasa news site nasa_url =", "first a tag and save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date", "a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not found\" except AttributeError: return None", "= browser.find_by_id('full_image') full_image_elem.click() # Find the more info button and click that browser.is_element_present_by_text('more", "news_date, latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image #", "Convert the browser html to a soup object and then quit the browser", "f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts():", "-------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside", "return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list", "results in a dictionary mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum,", "# -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def", "keep running, except it will stop when its AttributeError with none returned slide_elem", "# covers all exception errors return None # Assign columns and set index", "= \"URL\" for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2)", "def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception errors", "names_n_url = [] Hemisphere = \"Hemisphere\" Urlid = \"URL\" for x in range(len(hemis_search_list)):", "# adds column names mars_df.set_index('Description', inplace=True) # set column index # Convert dataframe", "# Set the executable path and initialize the chrome browser in splinter browser", "handling, espescially for AttributeErros with try/except # if error, code will keep running,", "Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'", "# Use the base url to create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\"", "find the relative image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return", "button and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() #", "= full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None # Use the base url", "-------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception", "= browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting html with soup parse_html =", "column names mars_df.set_index('Description', inplace=True) # set column index # Convert dataframe into HTML", "will keep running, except it will stop when its AttributeError with none returned", "return None # Assign columns and set index of dataframe mars_df.columns = ['Description',", "the full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the more info", "dataframe into HTML format, add bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars", "find the first a tag and save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text()", ": news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser),", "# browser function already defined outside # Visit the mars nasa news site", "to create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- #", "Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\"", "Urlid = \"URL\" for x in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'),", "none returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to", "nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)", "catalyst to get the function started, like a grandfather variable # browser function", "browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def", "Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable path and initialize", "and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse", "grandfather variable # browser function already defined outside # Visit the mars nasa", "if error, code will keep running, except it will stop when its AttributeError", "splinter import Browser from bs4 import BeautifulSoup as soupy import pandas as pd", "path and initialize the chrome browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True)", "news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser),", "holds other elements to furthur filter # Use the parent element to find", "except BaseException: # covers all exception errors return None # Assign columns and", "then quit the browser parse_html = browser.html news_soup = soupy(parse_html, 'html.parser') try: #", "info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting html with", "and stores results in a dictionary mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\"", "dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the", "Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere", "\"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return", ": news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link,", "index # Convert dataframe into HTML format, add bootstrap return mars_df.to_html(classes= \"table\") #", "and initialize the chrome browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) #", "covers all exception errors return None # Assign columns and set index of", "mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list =", "defined outside # Visit the mars nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url)", "all exception errors return None # Assign columns and set index of dataframe", "return None # Use the base url to create an absolute url latest_imgurl", "elements to furthur filter # Use the parent element to find the first", "= browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try: # find the relative image", "names_n_url if __name__ == \"__main__\": # if running as script, print scraped data", "datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): #", "news_soup = soupy(parse_html, 'html.parser') try: # add error handling, espescially for AttributeErros with", "= browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url =", "to furthur filter # Use the parent element to find the first a", "news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit", ": latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()}", "and save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() #", "\"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" :", "mars_news(browser) # Runs all separate scraping functions and stores results in a dictionary", "inplace=True) # set column index # Convert dataframe into HTML format, add bootstrap", "mars nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading", "when its AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element,", "\"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" :", "returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur", "tag and save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text()", "in a dictionary mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, #", "wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser' )", "-------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser):", "news_teaser_sum = mars_news(browser) # Runs all separate scraping functions and stores results in", "full_img_soup = soupy(parse_html, 'html.parser' ) try: # find the relative image url latest_image_full", "f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent element to find the paragraph text", "# Convert the browser html to a soup object and then quit the", "already defined outside # Visit the mars nasa news site nasa_url = 'https://mars.nasa.gov/news/'", "featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the full image button", "get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced',", "names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not found\" except AttributeError: return None #", "as pd import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # --------------------------------------------------------------------------------------------------------------------------------", "# Find the more info button and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem", "# if error, code will keep running, except it will stop when its", "News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of the", "# Imports and Executables # -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4 import", "dictionary mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" :", "None # Assign columns and set index of dataframe mars_df.columns = ['Description', 'Mars']", "exception errors return None # Assign columns and set index of dataframe mars_df.columns", "furthur filter # Use the parent element to find the first a tag", "# news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the", "mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date,", "basically a catalyst to get the function started, like a grandfather variable #", "-------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major", "= slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None # return news_title, news_teaser_sum, news_date, latest_art_link", "variable # browser function already defined outside # Visit the mars nasa news", "column index # Convert dataframe into HTML format, add bootstrap return mars_df.to_html(classes= \"table\")", "browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html =", "# -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined outside of the function, basically a catalyst", "# optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser", "= soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return", "Parse the resulting html with soup parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser'", "# JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url", "hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url", "names mars_df.set_index('Description', inplace=True) # set column index # Convert dataframe into HTML format,", "**{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt show automated script in action #", "started, like a grandfather variable # browser function already defined outside # Visit", "save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link", "error handling, espescially for AttributeErros with try/except # if error, code will keep", "get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and", "initialize the chrome browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless", "a dictionary mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\"", "info button and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click()", "news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs all separate scraping", "browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul", "a soup object and then quit the browser parse_html = browser.html news_soup =", ") try: # find the relative image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\")", "other elements to furthur filter # Use the parent element to find the", "# Use the parent element to find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text()", "= ['Description', 'Mars'] # adds column names mars_df.set_index('Description', inplace=True) # set column index", "click the full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the more", "latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df", "news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent", ": news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\":", "# -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find", "news_title, news_teaser_sum = mars_news(browser) # Runs all separate scraping functions and stores results", "a grandfather variable # browser function already defined outside # Visit the mars", "# \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\"", "the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None # return", "with try/except # if error, code will keep running, except it will stop", "from splinter import Browser from bs4 import BeautifulSoup as soupy import pandas as", "will stop when its AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list li.slide') #", "parent element, holds other elements to furthur filter # Use the parent element", "full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None # Use the base url to", "= soupy(parse_html, 'html.parser' ) try: # find the relative image url latest_image_full =", "an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact", "parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try: # find the relative", "Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException:", "Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable path and initialize the", "as soupy import pandas as pd import datetime as dt # -------------------------------------------------------------------------------------------------------------------------------- #", "Imports and Executables # -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4 import BeautifulSoup", "BaseException: # covers all exception errors return None # Assign columns and set", "# pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ == \"__main__\": # if running as", "Hemisphere Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\" Urlid = \"URL\" for x", "except AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) #", "for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser html to a", "browser.visit(nasa_url) # optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the", "the base url to create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl", "True, doesnt show automated script in action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum,", ") hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not", "except IndexError: return f\"Search result not found\" except AttributeError: return None # df_hemi_urls", "# Runs all separate scraping functions and stores results in a dictionary mars_total_data", "errors return None # Assign columns and set index of dataframe mars_df.columns =", "and click the full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the", "Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) #", "adds column names mars_df.set_index('Description', inplace=True) # set column index # Convert dataframe into", "\"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus", "news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs all separate scraping functions", "= slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent element", "to find the first a tag and save it as `news_title` news_title =", "df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if __name__ == \"__main__\": # if running", "paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None # return news_title,", "-------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable path and initialize the chrome browser", ": featured_image(browser), \"facts\" : mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data", "= browser.html news_soup = soupy(parse_html, 'html.parser') try: # add error handling, espescially for", "# -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): # defined", "url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the full image button full_image_elem", "# Assign columns and set index of dataframe mars_df.columns = ['Description', 'Mars'] #", "the browser html to a soup object and then quit the browser parse_html", "find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None #", "wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting html with soup", "img').get(\"src\") except AttributeError: return None # Use the base url to create an", "scrape_all(): # Set the executable path and initialize the chrome browser in splinter", "news_title, \"news_paragraph_summary\" : news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\"", "browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting html", "function started, like a grandfather variable # browser function already defined outside #", "AttributeErros with try/except # if error, code will keep running, except it will", "error, code will keep running, except it will stop when its AttributeError with", "the relative image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None", "JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url =", "soup object and then quit the browser parse_html = browser.html news_soup = soupy(parse_html,", "try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html,", "relative image url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None #", "Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\" Urlid =", "'html.parser' ) try: # find the relative image url latest_image_full = full_img_soup.select_one('figure.lede a", "-------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and", "'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere =", "Find the more info button and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem =", "as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul", "mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser): #", "def mars_news(browser): # defined outside of the function, basically a catalyst to get", "hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError:", "functions and stores results in a dictionary mars_total_data = { \"news_title\" : news_title,", "script in action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser) news_title,", "the first a tag and save it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() #", "Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\" Urlid", "hemi_click.click() parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li", "the parent element to find the first a tag and save it as", "Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the", "set index of dataframe mars_df.columns = ['Description', 'Mars'] # adds column names mars_df.set_index('Description',", "AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL'])", "# \"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\" :", "def scrape_all(): # Set the executable path and initialize the chrome browser in", "it as `news_title` news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link =", "espescially for AttributeErros with try/except # if error, code will keep running, except", "-------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0]", "create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars", "-------------------------------------------------------------------------------------------------------------------------------- # Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable path", "Runs all separate scraping functions and stores results in a dictionary mars_total_data =", "# news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs all", "Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere", "range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html", "soup parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try: # find the", "set column index # Convert dataframe into HTML format, add bootstrap return mars_df.to_html(classes=", "Enhanced'] names_n_url = [] Hemisphere = \"Hemisphere\" Urlid = \"URL\" for x in", "Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers", "latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None # Use the base", "absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl # -------------------------------------------------------------------------------------------------------------------------------- # Mars Fact Table", "chrome browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True,", "columns and set index of dataframe mars_df.columns = ['Description', 'Mars'] # adds column", "in range(len(hemis_search_list)): url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click()", "-1) return names_n_url if __name__ == \"__main__\": # if running as script, print", "stop when its AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent", "return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title and Paragraph # -------------------------------------------------------------------------------------------------------------------------------- def mars_news(browser):", "Find and click the full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find", "AttributeError: return None, None # return news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum", "text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None # return news_title, news_teaser_sum,", "page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser html to a soup object", "= 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) #", "news_teaser_sum, # \"news_latest_date\" : news_date, # \"news_latest_link\" : latest_art_link, \"featured_image\" : featured_image(browser), \"facts\"", "info') more_info_elem.click() # Parse the resulting html with soup parse_html = browser.html full_img_soup", "= pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception errors return None # Assign", "= mars_news(browser) # Runs all separate scraping functions and stores results in a", "add bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def", "-------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4 import BeautifulSoup as soupy import pandas", "AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other", "and Executables # -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4 import BeautifulSoup as", "disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs", "= mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs all separate scraping functions and", "'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert", "browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting html with soup parse_html = browser.html", "with soup parse_html = browser.html full_img_soup = soupy(parse_html, 'html.parser' ) try: # find", "url latest_image_full = full_img_soup.select_one('figure.lede a img').get(\"src\") except AttributeError: return None # Use the", "dataframe mars_df.columns = ['Description', 'Mars'] # adds column names mars_df.set_index('Description', inplace=True) # set", "executable path and initialize the chrome browser in splinter browser = Browser('chrome', **{'executable_path':'chromedriver'},", "html to a soup object and then quit the browser parse_html = browser.html", "slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use", "# Gathered Data # -------------------------------------------------------------------------------------------------------------------------------- def scrape_all(): # Set the executable path and", "button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the more info button and click", "to find the paragraph text news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text() except AttributeError: return None, None", "the function started, like a grandfather variable # browser function already defined outside", "# parent element, holds other elements to furthur filter # Use the parent", "Visit the mars nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay", "Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: #", "running, except it will stop when its AttributeError with none returned slide_elem =", "full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() # Find the more info button", "browser.find_by_id('full_image') full_image_elem.click() # Find the more info button and click that browser.is_element_present_by_text('more info',", "a img').get(\"src\") except AttributeError: return None # Use the base url to create", "= 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url) try: browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2) hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}') hemi_click.click() parse_html = browser.html", "headless = True, doesnt show automated script in action # pylint: disable=unbalanced-tuple-unpacking #", "browser parse_html = browser.html news_soup = soupy(parse_html, 'html.parser') try: # add error handling,", "# add error handling, espescially for AttributeErros with try/except # if error, code", "# -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis", "delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser html to", "the more info button and click that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more", "code will keep running, except it will stop when its AttributeError with none", "# Parse the resulting html with soup parse_html = browser.html full_img_soup = soupy(parse_html,", "Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url = []", "splinter browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True) # headless = True, doesnt show automated", "news_title = slide_elem.find('div',class_='content_title').get_text() # news_date = slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\"", "automated script in action # pylint: disable=unbalanced-tuple-unpacking # news_title, news_teaser_sum, news_date = mars_news(browser)", "news_title, news_teaser_sum, news_date = mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs all separate", "'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result", "more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting html with soup parse_html", "return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) #", "into HTML format, add bootstrap return mars_df.to_html(classes= \"table\") # -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres", "return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image # -------------------------------------------------------------------------------------------------------------------------------- #", "bs4 import BeautifulSoup as soupy import pandas as pd import datetime as dt", "news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum # -------------------------------------------------------------------------------------------------------------------------------- # JPL Featured Space Image", "to a soup object and then quit the browser parse_html = browser.html news_soup", "soupy(parse_html, 'html.parser') try: # add error handling, espescially for AttributeErros with try/except #", "news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading page browser.is_element_present_by_css(\"ul.item_list", "return None, None # return news_title, news_teaser_sum, news_date, latest_art_link return news_title, news_teaser_sum #", "None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth',", "except it will stop when its AttributeError with none returned slide_elem = news_soup.select_one('ul.item_list", "def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url) # Find and click the full image", "\"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News Title", "['Cerberus Hemisphere Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere", "of dataframe mars_df.columns = ['Description', 'Mars'] # adds column names mars_df.set_index('Description', inplace=True) #", "= soupy(parse_html, 'html.parser') try: # add error handling, espescially for AttributeErros with try/except", "parse_html = browser.html hemi_parse_html = soupy(parse_html, 'html.parser' ) hemi_img_url = hemi_parse_html.select_one('ul li a').get(\"href\")", "# Find and click the full image button full_image_elem = browser.find_by_id('full_image') full_image_elem.click() #", "Enhanced', 'Schiaparelli Hemisphere Enhanced', 'Syrtis Major Hemisphere Enhanced', 'Valles Marineris Hemisphere Enhanced'] names_n_url", "# -------------------------------------------------------------------------------------------------------------------------------- # Mars Hemispheres # -------------------------------------------------------------------------------------------------------------------------------- def get_url(browser): hemis_search_list = ['Cerberus Hemisphere", "# Mars Fact Table # -------------------------------------------------------------------------------------------------------------------------------- def mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except", "try/except # if error, code will keep running, except it will stop when", "browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1) # Convert the browser html to a soup object and", "mars_facts(): try: mars_df = pd.read_html('https://space-facts.com/mars/')[0] except BaseException: # covers all exception errors return", "browser function already defined outside # Visit the mars nasa news site nasa_url", "base url to create an absolute url latest_imgurl = f\"https://www.jpl.nasa.gov{latest_image_full}\" return latest_imgurl #", "latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent element to find the", "slide_elem.find('div',class_='list_date').get_text() # latest_art_link = f\"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}\" # Use the parent element to", "orient='columns') # df_hemi_urls.set_index('Hemisphere', inplace=True) # df_hemi_urls['URL']=str(df_hemi_urls['URL']) # pd.set_option('display.max_colwidth', -1) return names_n_url if __name__", "that browser.is_element_present_by_text('more info', wait_time=1) more_info_elem = browser.links.find_by_partial_text('more info') more_info_elem.click() # Parse the resulting", "mars_facts(), \"img_and_url\": get_url(browser), \"last_modified\" : dt.datetime.now()} browser.quit() return mars_total_data # -------------------------------------------------------------------------------------------------------------------------------- # News", "= [] Hemisphere = \"Hemisphere\" Urlid = \"URL\" for x in range(len(hemis_search_list)): url", "stores results in a dictionary mars_total_data = { \"news_title\" : news_title, \"news_paragraph_summary\" :", "element, holds other elements to furthur filter # Use the parent element to", "except AttributeError: return None # Use the base url to create an absolute", "-------------------------------------------------------------------------------------------------------------------------------- # Imports and Executables # -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4", "site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for loading page browser.is_element_present_by_css(\"ul.item_list li.slide\",", "get the function started, like a grandfather variable # browser function already defined", "Executables # -------------------------------------------------------------------------------------------------------------------------------- from splinter import Browser from bs4 import BeautifulSoup as soupy", "Space Image # -------------------------------------------------------------------------------------------------------------------------------- # Visit URL def featured_image(browser): url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url)", "outside of the function, basically a catalyst to get the function started, like", "the mars nasa news site nasa_url = 'https://mars.nasa.gov/news/' browser.visit(nasa_url) # optional delay for", "mars_news(browser) news_title, news_teaser_sum = mars_news(browser) # Runs all separate scraping functions and stores", "all separate scraping functions and stores results in a dictionary mars_total_data = {", "hemi_parse_html.select_one('ul li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not found\" except AttributeError:", "IndexError: return f\"Search result not found\" except AttributeError: return None # df_hemi_urls =", "return f\"Search result not found\" except AttributeError: return None # df_hemi_urls = pd.DataFrame.from_dict(names_n_url,", "from bs4 import BeautifulSoup as soupy import pandas as pd import datetime as", "li a').get(\"href\") names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url}) except IndexError: return f\"Search result not found\" except AttributeError: return" ]
[ "Maior [ 4 ] Novos Números [ 5 ] Sair do programa''') opcao", "[ 1 ] Somar [ 2 ] Multiplicar [ 3 ] Maior [", "entre {} + {} = {}!'.format(n1, n2, soma)) elif opcao == 2: produto", "{}!'.format(n1, n2, produto)) elif opcao == 3: if n1 > n2: maior =", "maior = n1 else: maior = n2 print('Entre os números {} e {}", "] Multiplicar [ 3 ] Maior [ 4 ] Novos Números [ 5", "5 ] Sair do programa''') opcao = int(input(\"Qual é a sua opção: \"))", "= n2 print('Entre os números {} e {} o maior é {}'.format(n1, n2,", "opcao = int(input(\"Qual é a sua opção: \")) if opcao == 1: soma", "5: print('Finalizando...') else: print('Opção inválida, tente outro número!') print('-=-' * 20) sleep(2) print('FIM", "5: print(''' [ 1 ] Somar [ 2 ] Multiplicar [ 3 ]", "= {}!'.format(n1, n2, produto)) elif opcao == 3: if n1 > n2: maior", "] Somar [ 2 ] Multiplicar [ 3 ] Maior [ 4 ]", "sleep n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) opcao =", "2: produto = n2 * n1 print('O produto entre {} * {} =", "{} * {} = {}!'.format(n1, n2, produto)) elif opcao == 3: if n1", "!= 5: print(''' [ 1 ] Somar [ 2 ] Multiplicar [ 3", "import sleep n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) opcao", "from time import sleep n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor:", "= int(input(\"Qual é a sua opção: \")) if opcao == 1: soma =", "opcao == 1: soma = n1 + n2 print('A soma entre {} +", "maior = n2 print('Entre os números {} e {} o maior é {}'.format(n1,", "else: maior = n2 print('Entre os números {} e {} o maior é", "os números {} e {} o maior é {}'.format(n1, n2, maior)) elif opcao", "print('O produto entre {} * {} = {}!'.format(n1, n2, produto)) elif opcao ==", "== 5: print('Finalizando...') else: print('Opção inválida, tente outro número!') print('-=-' * 20) sleep(2)", "Sair do programa''') opcao = int(input(\"Qual é a sua opção: \")) if opcao", "')) n2 = int(input('Segundo valor: ')) opcao = 0 while opcao != 5:", "print('Informe novamento os números!!') n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor:", "valor: ')) opcao = 0 while opcao != 5: print(''' [ 1 ]", "n2 print('A soma entre {} + {} = {}!'.format(n1, n2, soma)) elif opcao", "maior é {}'.format(n1, n2, maior)) elif opcao == 4: print('Informe novamento os números!!')", "= {}!'.format(n1, n2, soma)) elif opcao == 2: produto = n2 * n1", "opcao == 4: print('Informe novamento os números!!') n1 = int(input('Primeiro Valor: ')) n2", "elif opcao == 3: if n1 > n2: maior = n1 else: maior", "[ 4 ] Novos Números [ 5 ] Sair do programa''') opcao =", "print(''' [ 1 ] Somar [ 2 ] Multiplicar [ 3 ] Maior", "entre {} * {} = {}!'.format(n1, n2, produto)) elif opcao == 3: if", "else: print('Opção inválida, tente outro número!') print('-=-' * 20) sleep(2) print('FIM DO PROGAMA')", "= 0 while opcao != 5: print(''' [ 1 ] Somar [ 2", "{} + {} = {}!'.format(n1, n2, soma)) elif opcao == 2: produto =", "Valor: ')) n2 = int(input('Segundo valor: ')) opcao = 0 while opcao !=", "é a sua opção: \")) if opcao == 1: soma = n1 +", "[ 2 ] Multiplicar [ 3 ] Maior [ 4 ] Novos Números", "Multiplicar [ 3 ] Maior [ 4 ] Novos Números [ 5 ]", "[ 3 ] Maior [ 4 ] Novos Números [ 5 ] Sair", "= int(input('Segundo valor: ')) elif opcao == 5: print('Finalizando...') else: print('Opção inválida, tente", "elif opcao == 5: print('Finalizando...') else: print('Opção inválida, tente outro número!') print('-=-' *", "] Novos Números [ 5 ] Sair do programa''') opcao = int(input(\"Qual é", "{} = {}!'.format(n1, n2, soma)) elif opcao == 2: produto = n2 *", "')) n2 = int(input('Segundo valor: ')) elif opcao == 5: print('Finalizando...') else: print('Opção", "n2, soma)) elif opcao == 2: produto = n2 * n1 print('O produto", "2 ] Multiplicar [ 3 ] Maior [ 4 ] Novos Números [", "3 ] Maior [ 4 ] Novos Números [ 5 ] Sair do", "3: if n1 > n2: maior = n1 else: maior = n2 print('Entre", "números {} e {} o maior é {}'.format(n1, n2, maior)) elif opcao ==", "soma)) elif opcao == 2: produto = n2 * n1 print('O produto entre", "+ n2 print('A soma entre {} + {} = {}!'.format(n1, n2, soma)) elif", "números!!') n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) elif opcao", "\")) if opcao == 1: soma = n1 + n2 print('A soma entre", "1: soma = n1 + n2 print('A soma entre {} + {} =", "n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) opcao = 0", "n1 + n2 print('A soma entre {} + {} = {}!'.format(n1, n2, soma))", "= int(input('Segundo valor: ')) opcao = 0 while opcao != 5: print(''' [", "14/ex059P.py<gh_stars>0 from time import sleep n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo", "{}!'.format(n1, n2, soma)) elif opcao == 2: produto = n2 * n1 print('O", "print('A soma entre {} + {} = {}!'.format(n1, n2, soma)) elif opcao ==", "produto entre {} * {} = {}!'.format(n1, n2, produto)) elif opcao == 3:", "n2 print('Entre os números {} e {} o maior é {}'.format(n1, n2, maior))", "Valor: ')) n2 = int(input('Segundo valor: ')) elif opcao == 5: print('Finalizando...') else:", "* {} = {}!'.format(n1, n2, produto)) elif opcao == 3: if n1 >", "{}'.format(n1, n2, maior)) elif opcao == 4: print('Informe novamento os números!!') n1 =", "while opcao != 5: print(''' [ 1 ] Somar [ 2 ] Multiplicar", "n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) elif opcao ==", "maior)) elif opcao == 4: print('Informe novamento os números!!') n1 = int(input('Primeiro Valor:", "opcao == 2: produto = n2 * n1 print('O produto entre {} *", "n2 = int(input('Segundo valor: ')) opcao = 0 while opcao != 5: print('''", "> n2: maior = n1 else: maior = n2 print('Entre os números {}", "0 while opcao != 5: print(''' [ 1 ] Somar [ 2 ]", "n2 * n1 print('O produto entre {} * {} = {}!'.format(n1, n2, produto))", "')) opcao = 0 while opcao != 5: print(''' [ 1 ] Somar", "int(input('Segundo valor: ')) opcao = 0 while opcao != 5: print(''' [ 1", "4 ] Novos Números [ 5 ] Sair do programa''') opcao = int(input(\"Qual", "e {} o maior é {}'.format(n1, n2, maior)) elif opcao == 4: print('Informe", "int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) opcao = 0 while opcao", "== 4: print('Informe novamento os números!!') n1 = int(input('Primeiro Valor: ')) n2 =", "produto = n2 * n1 print('O produto entre {} * {} = {}!'.format(n1,", "n2, produto)) elif opcao == 3: if n1 > n2: maior = n1", "= n1 else: maior = n2 print('Entre os números {} e {} o", "Novos Números [ 5 ] Sair do programa''') opcao = int(input(\"Qual é a", "sua opção: \")) if opcao == 1: soma = n1 + n2 print('A", "opcao = 0 while opcao != 5: print(''' [ 1 ] Somar [", "4: print('Informe novamento os números!!') n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo", "n2: maior = n1 else: maior = n2 print('Entre os números {} e", "{} o maior é {}'.format(n1, n2, maior)) elif opcao == 4: print('Informe novamento", "+ {} = {}!'.format(n1, n2, soma)) elif opcao == 2: produto = n2", "opção: \")) if opcao == 1: soma = n1 + n2 print('A soma", "= int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) opcao = 0 while", "== 3: if n1 > n2: maior = n1 else: maior = n2", "n1 else: maior = n2 print('Entre os números {} e {} o maior", "Somar [ 2 ] Multiplicar [ 3 ] Maior [ 4 ] Novos", "<reponame>alaanlimaa/Python_CVM1-2-3<filename>Aula 14/ex059P.py<gh_stars>0 from time import sleep n1 = int(input('Primeiro Valor: ')) n2 =", "1 ] Somar [ 2 ] Multiplicar [ 3 ] Maior [ 4", "time import sleep n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: '))", "== 2: produto = n2 * n1 print('O produto entre {} * {}", "n1 > n2: maior = n1 else: maior = n2 print('Entre os números", "elif opcao == 4: print('Informe novamento os números!!') n1 = int(input('Primeiro Valor: '))", "opcao != 5: print(''' [ 1 ] Somar [ 2 ] Multiplicar [", "novamento os números!!') n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: '))", "= int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) elif opcao == 5:", "print('Entre os números {} e {} o maior é {}'.format(n1, n2, maior)) elif", "n1 print('O produto entre {} * {} = {}!'.format(n1, n2, produto)) elif opcao", "= n2 * n1 print('O produto entre {} * {} = {}!'.format(n1, n2,", "if n1 > n2: maior = n1 else: maior = n2 print('Entre os", "int(input('Segundo valor: ')) elif opcao == 5: print('Finalizando...') else: print('Opção inválida, tente outro", "os números!!') n1 = int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) elif", "')) elif opcao == 5: print('Finalizando...') else: print('Opção inválida, tente outro número!') print('-=-'", "Números [ 5 ] Sair do programa''') opcao = int(input(\"Qual é a sua", "[ 5 ] Sair do programa''') opcao = int(input(\"Qual é a sua opção:", "do programa''') opcao = int(input(\"Qual é a sua opção: \")) if opcao ==", "produto)) elif opcao == 3: if n1 > n2: maior = n1 else:", "valor: ')) elif opcao == 5: print('Finalizando...') else: print('Opção inválida, tente outro número!')", "elif opcao == 2: produto = n2 * n1 print('O produto entre {}", "soma = n1 + n2 print('A soma entre {} + {} = {}!'.format(n1,", "opcao == 3: if n1 > n2: maior = n1 else: maior =", "opcao == 5: print('Finalizando...') else: print('Opção inválida, tente outro número!') print('-=-' * 20)", "int(input(\"Qual é a sua opção: \")) if opcao == 1: soma = n1", "{} e {} o maior é {}'.format(n1, n2, maior)) elif opcao == 4:", "int(input('Primeiro Valor: ')) n2 = int(input('Segundo valor: ')) elif opcao == 5: print('Finalizando...')", "if opcao == 1: soma = n1 + n2 print('A soma entre {}", "== 1: soma = n1 + n2 print('A soma entre {} + {}", "soma entre {} + {} = {}!'.format(n1, n2, soma)) elif opcao == 2:", "print('Finalizando...') else: print('Opção inválida, tente outro número!') print('-=-' * 20) sleep(2) print('FIM DO", "{} = {}!'.format(n1, n2, produto)) elif opcao == 3: if n1 > n2:", "programa''') opcao = int(input(\"Qual é a sua opção: \")) if opcao == 1:", "n2, maior)) elif opcao == 4: print('Informe novamento os números!!') n1 = int(input('Primeiro", "] Maior [ 4 ] Novos Números [ 5 ] Sair do programa''')", "o maior é {}'.format(n1, n2, maior)) elif opcao == 4: print('Informe novamento os", "é {}'.format(n1, n2, maior)) elif opcao == 4: print('Informe novamento os números!!') n1", "= n1 + n2 print('A soma entre {} + {} = {}!'.format(n1, n2,", "n2 = int(input('Segundo valor: ')) elif opcao == 5: print('Finalizando...') else: print('Opção inválida,", "a sua opção: \")) if opcao == 1: soma = n1 + n2", "] Sair do programa''') opcao = int(input(\"Qual é a sua opção: \")) if", "* n1 print('O produto entre {} * {} = {}!'.format(n1, n2, produto)) elif" ]
[ "activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection activitypub", "activity_serializer = activitypub.Comment pure_type = 'Note' class Quotation(Status): ''' like a review but", "activity['to'] = [followers] activity['cc'] = [public] + mentions elif self.privacy == 'followers': activity['to']", "''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY", "notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks if notifcation is in", "constraint can't work as it would cross tables. # class Meta: # unique_together", "collection activitypub ----# @classmethod def replies(cls, status): ''' load all replies to a", "w/e) users ''' message = self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>' %", "fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField(", "''' user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True,", "''' return tombstone if the status is deleted ''' if self.deleted: return activitypub.Tombstone(", "pure=False): ''' return tombstone if the status is deleted ''' if self.deleted: return", "from bookwyrm import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel,", "return tombstone if the status is deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id,", "user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer", "checks if notifcation is in enum list for valid types ''' constraints =", "a better way to write this so it's just a property ''' return", ".base_model import BookWyrmModel, PrivacyLevels from . import fields from .fields import image_serializer class", "= [ image_serializer(b.cover) for b in self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'):", "'Note' class Comment(Status): ''' like a review but without a rating and transient", "if pure: activity['content'] = self.pure_content if 'name' in activity: activity['name'] = self.pure_name activity['type']", "b in self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return", "activity ''' @property def pure_content(self): ''' indicate the book in question for mastodon", "user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey(", "fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost # This constraint can't", "related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True)", "'direct': activity['to'] = mentions activity['cc'] = [] # \"pure\" serialization for non-bookwyrm instances", "of \"%s\" (%d stars): %s' % ( self.book.title, self.rating, self.name ) return 'Review", "Meta: ''' checks if notifcation is in enum list for valid types '''", "href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book in self.mention_books.all() ) return '%s %s", "the ui using activity type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper", "update user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class Meta:", "[followers] + mentions elif self.privacy == 'unlisted': activity['to'] = [followers] activity['cc'] = [public]", "books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): ''' like a review", "#----- replies collection activitypub ----# @classmethod def replies(cls, status): ''' load all replies", "''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer", "messages about user activity ''' @property def pure_content(self): ''' indicate the book in", ") activity_serializer = activitypub.Boost # This constraint can't work as it would cross", "\\ for book in self.mention_books.all() ) return '%s %s %s' % (self.user.display_name, message,", "type of status for the ui using activity type ''' return self.activity_serializer.__name__ def", "= [public] + mentions elif self.privacy == 'followers': activity['to'] = [followers] activity['cc'] =", "related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True)", "models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True,", "id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy", "if the status is deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(),", "super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are app-generated messages about user activity '''", "''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object')", "u in self.mention_users.all()] # this is a link to the followers list: followers", "''' expose the type of status for the ui using activity type '''", "= self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b in self.mention_books.all() \\ if b.cover]", "is in enum list for valid types ''' constraints = [ models.CheckConstraint( check=models.Q(notification_type__in=NotificationType.values),", "related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive =", "def status_type(self): ''' expose the type of status for the ui using activity", "replies to a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs )", "import InheritanceManager from bookwyrm import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model", "replies(cls, status): ''' load all replies to a status. idk if there's a", "activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post ''' user", "class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT,", "in enum list for valid types ''' constraints = [ models.CheckConstraint( check=models.Q(notification_type__in=NotificationType.values), name=\"notification_type_valid\",", "''' update user active time ''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return", "''' helper function for loading AP serialized replies to a status ''' return", "= models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status',", "= fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the book in", "can't be this, because of receiving federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published')", "models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the created date can't", "''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost #", "on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args, **kwargs): ''' update user active", "django.utils import timezone from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from", "'Note' class Quotation(Status): ''' like a review but without a rating and transient", ".fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a reply to", "in question for mastodon (or w/e) users ''' return self.content + '<br><br>(comment on", "= models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import", "followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True)", "= fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user')", "self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note' class Quotation(Status): ''' like a review", "time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: ''' can't fav", "= 'Note' class Quotation(Status): ''' like a review but without a rating and", "max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the created date can't be", "related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user')", "property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the type of status", "users ''' return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title)", "IMPORT') class Notification(BookWyrmModel): ''' you've been tagged, liked, followed, etc ''' user =", "related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public',", "= fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer =", "null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type = models.CharField(", "a post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status',", "'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection activitypub ----# @classmethod def replies(cls,", "self.pure_name activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b in self.mention_books.all() \\", "fav'ing a post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey(", "null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify review names for", "'<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book in self.mention_books.all() ) return '%s", "to the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to']", "timezone from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from model_utils.managers import", "''' message = self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title)", "**kwargs) class GeneratedNote(Status): ''' these are app-generated messages about user activity ''' @property", "% \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note' class Quotation(Status): '''", "from django.db import models from model_utils.managers import InheritanceManager from bookwyrm import activitypub from", "but without a rating and transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')", "', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book in self.mention_books.all() )", "list for valid types ''' constraints = [ models.CheckConstraint( check=models.Q(notification_type__in=NotificationType.values), name=\"notification_type_valid\", ) ]", "= [u.remote_id for u in self.mention_users.all()] # this is a link to the", "= models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks if notifcation", "\\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): '''", "''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the type of status for", "''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users =", "return 'Review of \"%s\": %s' % ( self.book.title, self.name ) @property def pure_content(self):", "'<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type =", "''' load all replies to a status. idk if there's a better way", "BookWyrmModel): ''' fav'ing a post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status", "Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a reply to a review, etc '''", "deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls public", "= fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate", "''' name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating =", "( self.book.title, self.rating, self.name ) return 'Review of \"%s\": %s' % ( self.book.title,", "'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields =", "using activity type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function for", "Comment(Status): ''' like a review but without a rating and transient ''' book", "on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')]", "way to write this so it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property", "''' these are app-generated messages about user activity ''' @property def pure_content(self): '''", "rating and transient ''' quote = fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')", "class Notification(BookWyrmModel): ''' you've been tagged, liked, followed, etc ''' user = models.ForeignKey('User',", "on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost # This constraint can't work as", "status is deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize()", "non-bookwyrm instances if pure: activity['content'] = self.pure_content if 'name' in activity: activity['name'] =", "import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a reply to a", "Notification(BookWyrmModel): ''' you've been tagged, liked, followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT)", "activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields", "Meta: ''' can't fav things twice ''' unique_together = ('user', 'status') class Boost(Status):", "Boost(Status): ''' boost'ing a post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object',", ".field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] = [public] activity['cc'] = [followers] + mentions", "self.book.title) activity_serializer = activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a", "for u in self.mention_users.all()] # this is a link to the followers list:", "activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False,", "etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users", "elif self.privacy == 'followers': activity['to'] = [followers] activity['cc'] = mentions if self.privacy ==", "w/e) users ''' return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id,", "for mastodon (or w/e) users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\", "PrivacyLevels from . import fields from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): '''", ") activity_serializer = activitypub.Quotation pure_type = 'Note' class Review(Status): ''' a book review", "controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in self.mention_users.all()] # this", "null=True) finish_date = models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs): ''' update user", "book in self.mention_books.all() ) return '%s %s %s' % (self.user.display_name, message, books) activity_serializer", "Review(Status): ''' a book review ''' name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey(", "a book in the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book',", "a book review ''' name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT,", "status. idk if there's a better way to write this so it's just", "models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True)", "is deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity", "= 'Note' class Comment(Status): ''' like a review but without a rating and", "to a status. idk if there's a better way to write this so", "content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local", "save(self, *args, **kwargs): ''' update user active time ''' self.user.last_active_date = timezone.now() self.user.save()", "but without a rating and transient ''' quote = fields.TextField() book = fields.ForeignKey(", "= fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField(", "model_utils.managers import InheritanceManager from bookwyrm import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from", "fav things twice ''' unique_together = ('user', 'status') class Boost(Status): ''' boost'ing a", "return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function for loading AP serialized replies", "privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the created", "models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date =", "class Meta: ''' can't fav things twice ''' unique_together = ('user', 'status') class", "models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks if notifcation is", "''' boost'ing a post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', )", "# class Meta: # unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress", "models for storing different kinds of Activities ''' from django.utils import timezone from", "write this so it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self):", "# This constraint can't work as it would cross tables. # class Meta:", "tagged, liked, followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition',", "b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self, *args, **kwargs):", "on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition',", ") @property def pure_name(self): ''' clarify review names for mastodon serialization ''' if", "% ( self.book.title, self.name ) @property def pure_content(self): ''' indicate the book in", "\\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self,", "= 'Note' class Review(Status): ''' a book review ''' name = fields.CharField(max_length=255, null=True)", "mentions = [u.remote_id for u in self.mention_users.all()] # this is a link to", "things twice ''' unique_together = ('user', 'status') class Boost(Status): ''' boost'ing a post", "self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type = 'Note' class Review(Status):", "choices=NotificationType.choices) class Meta: ''' checks if notifcation is in enum list for valid", "replies to a status. idk if there's a better way to write this", "(%d stars): %s' % ( self.book.title, self.rating, self.name ) return 'Review of \"%s\":", "transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate", "etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user", "start_date = models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True) def save(self, *args,", "self.mention_users.all()] # this is a link to the followers list: followers = self.user.__class__._meta.get_field('followers')\\", "models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class", "''' fav'ing a post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status =", "**kwargs) class Meta: ''' can't fav things twice ''' unique_together = ('user', 'status')", "on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta: '''", "if 'name' in activity: activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment'] = [", "time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE", "loading AP serialized replies to a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' %", ") def to_activity(self, pure=False): ''' return tombstone if the status is deleted '''", "serialized replies to a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs", "self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are", "Store progress through a book in the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT)", "mastodon (or w/e) users ''' return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' %", "import timezone from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from model_utils.managers", "= 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in self.mention_users.all()] # this is a", "objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments',", "BookWyrmModel, PrivacyLevels from . import fields from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel):", "of receiving federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date", "self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION", "import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from", "activity['cc'] = [followers] + mentions elif self.privacy == 'unlisted': activity['to'] = [followers] activity['cc']", "= activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): ''' like a review but without", "activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): ''' like a review but without a", "fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField( max_length=255,", "def pure_name(self): ''' clarify review names for mastodon serialization ''' if self.rating: return", "the book in question for mastodon (or w/e) users ''' return '\"%s\"<br>-- <a", "from django.utils import timezone from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models", "be this, because of receiving federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted", "mentions elif self.privacy == 'unlisted': activity['to'] = [followers] activity['cc'] = [public] + mentions", "in question for mastodon (or w/e) users ''' message = self.content books =", "self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b in self.mention_books.all() \\ if b.cover] if", "null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent =", "def pure_content(self): ''' indicate the book in question for mastodon (or w/e) users", "book in the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT)", "''' quote = fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self):", "\\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note' class Quotation(Status): ''' like", "'followers': activity['to'] = [followers] activity['cc'] = mentions if self.privacy == 'direct': activity['to'] =", "published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls public =", "helper function for loading AP serialized replies to a status ''' return self.to_ordered_collection(", "= models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True,", "'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey(", "MinValueValidator from django.db import models from model_utils.managers import InheritanceManager from bookwyrm import activitypub", "models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date", "\"pure\" serialization for non-bookwyrm instances if pure: activity['content'] = self.pure_content if 'name' in", "timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW", "activity['replies'] = self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for", "''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): '''", "models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'),", "@property def pure_content(self): ''' indicate the book in question for mastodon (or w/e)", "= activitypub.Like def save(self, *args, **kwargs): ''' update user active time ''' self.user.last_active_date", "related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255,", "self.user.save() super().save(*args, **kwargs) class Meta: ''' can't fav things twice ''' unique_together =", "self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self, pure=False): ''' return tombstone if", "blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify review names for mastodon", "def to_activity(self, pure=False): ''' return tombstone if the status is deleted ''' if", "and transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): '''", "mastodon (or w/e) users ''' message = self.content books = ', '.join( '<a", "self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in", "from model_utils.managers import InheritanceManager from bookwyrm import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin", "= fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer = activitypub.Note", "review ''' name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating", "models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs): ''' update user active time '''", "pure: activity['content'] = self.pure_content if 'name' in activity: activity['name'] = self.pure_name activity['type'] =", "in self.mention_books.all() ) return '%s %s %s' % (self.user.display_name, message, books) activity_serializer =", "self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST", "save(self, *args, **kwargs): ''' update user active time ''' if self.user.local: self.user.last_active_date =", "transient ''' quote = fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def", "users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer =", "@property def status_type(self): ''' expose the type of status for the ui using", "in question for mastodon (or w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' %", "reply to a review, etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content", "= [('attachments', 'attachment')] #----- replies collection activitypub ----# @classmethod def replies(cls, status): '''", "= [followers] activity['cc'] = mentions if self.privacy == 'direct': activity['to'] = mentions activity['cc']", "on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def", "user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True)", "the created date can't be this, because of receiving federated posts published_date =", "models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT,", "receiving federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date =", "activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies", "followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] = [public] activity['cc'] =", "active time ''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs) class", "import fields from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like", "'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT,", "*args, **kwargs): ''' update user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args,", "blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs): ''' update", "from . import fields from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any", ").serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public'", "fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive", "( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type = 'Note' class", "cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the type of status for the ui", "like a reply to a review, etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT,", "a review but without a rating and transient ''' quote = fields.TextField() book", "posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True)", "the type of status for the ui using activity type ''' return self.activity_serializer.__name__", "% (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): '''", "on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date = models.DateTimeField(", "as it would cross tables. # class Meta: # unique_together = ('user', 'boosted_status')", "'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the book in question for", "active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType',", "for mastodon (or w/e) users ''' message = self.content books = ', '.join(", "been tagged, liked, followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey(", "on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status',", "import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from . import fields from", "''' any post, like a reply to a review, etc ''' user =", "clarify review names for mastodon serialization ''' if self.rating: return 'Review of \"%s\"", "storing different kinds of Activities ''' from django.utils import timezone from django.core.validators import", "fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify review", "books = ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book in", "instances if pure: activity['content'] = self.pure_content if 'name' in activity: activity['name'] = self.pure_name", "active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: ''' can't", "activity def save(self, *args, **kwargs): ''' update user active time ''' if self.user.local:", "self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function for loading AP serialized replies to", "indicate the book in question for mastodon (or w/e) users ''' message =", "class ReadThrough(BookWyrmModel): ''' Store progress through a book in the database. ''' user", "self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are app-generated", "= timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG", "pure_type = 'Note' class Comment(Status): ''' like a review but without a rating", "status for the ui using activity type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs):", "*args, **kwargs): ''' update user active time ''' if self.user.local: self.user.last_active_date = timezone.now()", "activity_serializer = activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post", "mastodon (or w/e) users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id,", "deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites'", "\"%s\" (%d stars): %s' % ( self.book.title, self.rating, self.name ) return 'Review of", "= activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post '''", "[followers] activity['cc'] = mentions if self.privacy == 'direct': activity['to'] = mentions activity['cc'] =", "(or w/e) users ''' return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\", "''' unique_together = ('user', 'status') class Boost(Status): ''' boost'ing a post ''' boosted_status", "deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite',", "rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): '''", "local = models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False)", "image_serializer(self.book.cover) ) return activity def save(self, *args, **kwargs): ''' update user active time", "for book in self.mention_books.all() ) return '%s %s %s' % (self.user.display_name, message, books)", "book in question for mastodon (or w/e) users ''' message = self.content books", "= timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are app-generated messages", "(self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing", "review, etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True)", "'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta:", "book.title) \\ for book in self.mention_books.all() ) return '%s %s %s' % (self.user.display_name,", "self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type = 'Note' class Review(Status): '''", "GeneratedNote(Status): ''' these are app-generated messages about user activity ''' @property def pure_content(self):", "% \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel):", "null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy", "activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args,", "''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the", "[followers] activity['cc'] = [public] + mentions elif self.privacy == 'followers': activity['to'] = [followers]", "self.rating, self.name ) return 'Review of \"%s\": %s' % ( self.book.title, self.name )", "on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self,", "= models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks if notifcation is in enum", "pure_type = 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post ''' user =", "stars): %s' % ( self.book.title, self.rating, self.name ) return 'Review of \"%s\": %s'", "= timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: ''' can't fav things twice '''", "finish_date = models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs): ''' update user active", "''' indicate the book in question for mastodon (or w/e) users ''' message", "this is a link to the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if", "= mentions activity['cc'] = [] # \"pure\" serialization for non-bookwyrm instances if pure:", "mastodon (or w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id,", "% ( self.book.title, self.rating, self.name ) return 'Review of \"%s\": %s' % (", "if self.privacy == 'public': activity['to'] = [public] activity['cc'] = [followers] + mentions elif", "[public] activity['cc'] = [followers] + mentions elif self.privacy == 'unlisted': activity['to'] = [followers]", "activitypub.Boost # This constraint can't work as it would cross tables. # class", "@classmethod def replies(cls, status): ''' load all replies to a status. idk if", "self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def", "of \"%s\": %s' % ( self.book.title, self.name ) @property def pure_content(self): ''' indicate", "if self.rating: return 'Review of \"%s\" (%d stars): %s' % ( self.book.title, self.rating,", "this, because of receiving federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted =", "deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity =", "null=True) def save(self, *args, **kwargs): ''' update user active time ''' self.user.last_active_date =", "''' like a review but without a rating and transient ''' quote =", "activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b in self.mention_books.all() \\ if", "activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self, *args, **kwargs): ''' update user active", "on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type =", "indicate the book in question for mastodon (or w/e) users ''' return '\"%s\"<br>--", "[public] + mentions elif self.privacy == 'followers': activity['to'] = [followers] activity['cc'] = mentions", "expose the type of status for the ui using activity type ''' return", "progress through a book in the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book", "''' clarify review names for mastodon serialization ''' if self.rating: return 'Review of", "message, books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): ''' like a", "= models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self',", "book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1),", "return '%s %s %s' % (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type =", "null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT,", "models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the", "= activitypub.Boost # This constraint can't work as it would cross tables. #", "activity['to'] = [followers] activity['cc'] = mentions if self.privacy == 'direct': activity['to'] = mentions", "for non-bookwyrm instances if pure: activity['content'] = self.pure_content if 'name' in activity: activity['name']", "to write this so it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def", "public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in self.mention_users.all()] # this is", "post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost", "to a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def", "return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self, pure=False): ''' return", "indicate the book in question for mastodon (or w/e) users ''' return self.content", "''' return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer", "fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args, **kwargs): ''' update", "null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments',", "== 'followers': activity['to'] = [followers] activity['cc'] = mentions if self.privacy == 'direct': activity['to']", "''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: ''' can't fav things", "= ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book in self.mention_books.all()", "(or w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title,", "default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify review names", "created date can't be this, because of receiving federated posts published_date = fields.DateTimeField(", "class GeneratedNote(Status): ''' these are app-generated messages about user activity ''' @property def", "of Activities ''' from django.utils import timezone from django.core.validators import MaxValueValidator, MinValueValidator from", "published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites", "in the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read", "activity['cc'] = [] # \"pure\" serialization for non-bookwyrm instances if pure: activity['content'] =", "= models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status", "Activities ''' from django.utils import timezone from django.core.validators import MaxValueValidator, MinValueValidator from django.db", "== 'direct': activity['to'] = mentions activity['cc'] = [] # \"pure\" serialization for non-bookwyrm", "for b in self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) )", "('user', 'status') class Boost(Status): ''' boost'ing a post ''' boosted_status = fields.ForeignKey( 'Status',", "= models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs):", "class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a reply to a review, etc", "= models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read =", "all replies to a status. idk if there's a better way to write", "'\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation", "activitypub.Like def save(self, *args, **kwargs): ''' update user active time ''' self.user.last_active_date =", "on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob',", "'public': activity['to'] = [public] activity['cc'] = [followers] + mentions elif self.privacy == 'unlisted':", "list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] = [public] activity['cc']", "you've been tagged, liked, followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book =", "= fields.BooleanField(default=False) # the created date can't be this, because of receiving federated", "so it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose", "update user active time ''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args,", ") reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer", "ui using activity type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function", "'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in self.mention_users.all()] # this is a link", "self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment", "models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs): '''", "= mentions if self.privacy == 'direct': activity['to'] = mentions activity['cc'] = [] #", "of status for the ui using activity type ''' return self.activity_serializer.__name__ def to_replies(self,", "database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField(", "models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import =", "= 'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post ''' user = fields.ForeignKey(", "blank=True, null=True) def save(self, *args, **kwargs): ''' update user active time ''' self.user.last_active_date", "elif self.privacy == 'unlisted': activity['to'] = [followers] activity['cc'] = [public] + mentions elif", "'status') class Boost(Status): ''' boost'ing a post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT,", "symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo',", "**kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT')", "= models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' )", "+ mentions elif self.privacy == 'followers': activity['to'] = [followers] activity['cc'] = mentions if", "= fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices )", "self.privacy == 'public': activity['to'] = [public] activity['cc'] = [followers] + mentions elif self.privacy", "a review, etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True,", "'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been", "= fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify", "timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are app-generated messages about", "= fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost # This constraint", "''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user =", "= ('user', 'status') class Boost(Status): ''' boost'ing a post ''' boosted_status = fields.ForeignKey(", "('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through a book in the database.", "mentions activity['cc'] = [] # \"pure\" serialization for non-bookwyrm instances if pure: activity['content']", "user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User',", "''' indicate the book in question for mastodon (or w/e) users ''' return", "''' if self.rating: return 'Review of \"%s\" (%d stars): %s' % ( self.book.title,", "ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id", "activitypub.Quotation pure_type = 'Note' class Review(Status): ''' a book review ''' name =", "question for mastodon (or w/e) users ''' message = self.content books = ',", "'%s %s %s' % (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note'", ") return 'Review of \"%s\": %s' % ( self.book.title, self.name ) @property def", "status): ''' load all replies to a status. idk if there's a better", "''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self, pure=False): '''", "pages_read = models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField(", "(self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note' class Quotation(Status): ''' like a", ". import fields from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post,", "import MaxValueValidator, MinValueValidator from django.db import models from model_utils.managers import InheritanceManager from bookwyrm", "MaxValueValidator, MinValueValidator from django.db import models from model_utils.managers import InheritanceManager from bookwyrm import", "for mastodon serialization ''' if self.rating: return 'Review of \"%s\" (%d stars): %s'", "pure_type = 'Note' class Review(Status): ''' a book review ''' name = fields.CharField(max_length=255,", "= models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel):", "in self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity", "book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the book", "AP serialized replies to a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id,", "== 'unlisted': activity['to'] = [followers] activity['cc'] = [public] + mentions elif self.privacy ==", "book in question for mastodon (or w/e) users ''' return self.content + '<br><br>(<a", "self.mention_books.all() ) return '%s %s %s' % (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote", "NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class", "related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read", "for mastodon (or w/e) users ''' return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)'", "# the created date can't be this, because of receiving federated posts published_date", "hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self, *args, **kwargs): ''' update", "fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the", "can't fav things twice ''' unique_together = ('user', 'status') class Boost(Status): ''' boost'ing", "mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy =", "a review but without a rating and transient ''' book = fields.ForeignKey( 'Edition',", "href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note' class Quotation(Status):", "+ mentions elif self.privacy == 'unlisted': activity['to'] = [followers] activity['cc'] = [public] +", "a status. idk if there's a better way to write this so it's", "timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: ''' can't fav things twice ''' unique_together", "@property def pure_name(self): ''' clarify review names for mastodon serialization ''' if self.rating:", "models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False)", "= activitypub.Quotation pure_type = 'Note' class Review(Status): ''' a book review ''' name", "just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the type", "self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book", "serialization for non-bookwyrm instances if pure: activity['content'] = self.pure_content if 'name' in activity:", "'Note' class Review(Status): ''' a book review ''' name = fields.CharField(max_length=255, null=True) book", "mentions elif self.privacy == 'followers': activity['to'] = [followers] activity['cc'] = mentions if self.privacy", "from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from model_utils.managers import InheritanceManager", "boost'ing a post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer", "cross tables. # class Meta: # unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): '''", "self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are app-generated messages about user", "the book in question for mastodon (or w/e) users ''' message = self.content", "privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in self.mention_users.all()] #", "class Review(Status): ''' a book review ''' name = fields.CharField(max_length=255, null=True) book =", "% self.remote_id, **kwargs ) def to_activity(self, pure=False): ''' return tombstone if the status", "enum list for valid types ''' constraints = [ models.CheckConstraint( check=models.Q(notification_type__in=NotificationType.values), name=\"notification_type_valid\", )", "= activitypub.Comment pure_type = 'Note' class Quotation(Status): ''' like a review but without", "on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the book in question for mastodon", "= fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None,", "ReadThrough(BookWyrmModel): ''' Store progress through a book in the database. ''' user =", "% (book.remote_id, book.title) \\ for book in self.mention_books.all() ) return '%s %s %s'", "= self.pure_content if 'name' in activity: activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment']", "class Boost(Status): ''' boost'ing a post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters',", "activity['to'] = [public] activity['cc'] = [followers] + mentions elif self.privacy == 'unlisted': activity['to']", "'.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for book in self.mention_books.all() ) return", "image_serializer(b.cover) for b in self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover)", "through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', )", "return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the type of status for the", "'unlisted': activity['to'] = [followers] activity['cc'] = [public] + mentions elif self.privacy == 'followers':", "'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type", "if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self, *args, **kwargs): '''", "%s' % ( self.book.title, self.rating, self.name ) return 'Review of \"%s\": %s' %", "fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] )", "BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been tagged, liked, followed, etc ''' user", "any post, like a reply to a review, etc ''' user = fields.ForeignKey(", "self.content, ) activity_serializer = activitypub.Quotation pure_type = 'Note' class Review(Status): ''' a book", "from .base_model import BookWyrmModel, PrivacyLevels from . import fields from .fields import image_serializer", ".base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from . import fields", "app-generated messages about user activity ''' @property def pure_content(self): ''' indicate the book", "''' Store progress through a book in the database. ''' user = models.ForeignKey('User',", "null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True) def", "activity_serializer = activitypub.Boost # This constraint can't work as it would cross tables.", "user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: '''", "user active time ''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs)", "a rating and transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def", "[ image_serializer(b.cover) for b in self.mention_books.all() \\ if b.cover] if hasattr(self, 'book'): activity['attachment'].append(", "# this is a link to the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers)", "question for mastodon (or w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % (", "= models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True)", "these are app-generated messages about user activity ''' @property def pure_content(self): ''' indicate", "review but without a rating and transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT,", "''' from django.utils import timezone from django.core.validators import MaxValueValidator, MinValueValidator from django.db import", "blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date = models.DateTimeField( blank=True, null=True) def save(self,", "deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection activitypub ----# @classmethod def replies(cls, status):", "Quotation(Status): ''' like a review but without a rating and transient ''' quote", "MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify review names for mastodon serialization '''", "related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost # This constraint can't work as it", "a reply to a review, etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo')", "book in question for mastodon (or w/e) users ''' return self.content + '<br><br>(comment", "self.privacy == 'unlisted': activity['to'] = [followers] activity['cc'] = [public] + mentions elif self.privacy", "on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True,", "models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent", "(or w/e) users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title)", "can't work as it would cross tables. # class Meta: # unique_together =", "''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self)", "unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through a book in", ") sensitive = fields.BooleanField(default=False) # the created date can't be this, because of", "the book in question for mastodon (or w/e) users ''' return self.content +", "= models.ForeignKey( 'ImportJob', on_delete=models.PROTECT, null=True) read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices)", "= fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites =", "class Meta: ''' checks if notifcation is in enum list for valid types", "+ '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type", "== 'public': activity['to'] = [public] activity['cc'] = [followers] + mentions elif self.privacy ==", "'name' in activity: activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover)", "if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these", "''' @property def pure_content(self): ''' indicate the book in question for mastodon (or", "**kwargs ) def to_activity(self, pure=False): ''' return tombstone if the status is deleted", "class Quotation(Status): ''' like a review but without a rating and transient '''", "read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks if", "activity['content'] = self.pure_content if 'name' in activity: activity['name'] = self.pure_name activity['type'] = self.pure_type", "like a review but without a rating and transient ''' book = fields.ForeignKey(", "'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've", "= models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the created date", "date can't be this, because of receiving federated posts published_date = fields.DateTimeField( default=timezone.now,", "activity type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function for loading", "def to_replies(self, **kwargs): ''' helper function for loading AP serialized replies to a", "if b.cover] if hasattr(self, 'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self, *args,", "'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args, **kwargs): ''' update user", "w/e) users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer", "''' update user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class", "user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType = models.TextChoices(", "'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost # This constraint can't work", "a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the type of", "mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices", "twice ''' unique_together = ('user', 'status') class Boost(Status): ''' boost'ing a post '''", "= fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local =", "pure_name(self): ''' clarify review names for mastodon serialization ''' if self.rating: return 'Review", "import models from model_utils.managers import InheritanceManager from bookwyrm import activitypub from .base_model import", ") objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields =", "replies collection activitypub ----# @classmethod def replies(cls, status): ''' load all replies to", "the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read =", "activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from .", "activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() #", "for storing different kinds of Activities ''' from django.utils import timezone from django.core.validators", "pure_type = 'Note' class Quotation(Status): ''' like a review but without a rating", "= activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection", "if there's a better way to write this so it's just a property", "''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review", "would cross tables. # class Meta: # unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel):", "post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT,", "def replies(cls, status): ''' load all replies to a status. idk if there's", "ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from . import fields from .fields", "pure_content(self): ''' indicate the book in question for mastodon (or w/e) users '''", "self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self, pure=False): ''' return tombstone", "django.db import models from model_utils.managers import InheritanceManager from bookwyrm import activitypub from .base_model", "return activity def save(self, *args, **kwargs): ''' update user active time ''' if", "in activity: activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover) for", "sensitive = fields.BooleanField(default=False) # the created date can't be this, because of receiving", "self.privacy == 'followers': activity['to'] = [followers] activity['cc'] = mentions if self.privacy == 'direct':", "activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the book in question for mastodon (or", "# privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u in self.mention_users.all()]", "post, like a reply to a review, etc ''' user = fields.ForeignKey( 'User',", "def save(self, *args, **kwargs): ''' update user active time ''' if self.user.local: self.user.last_active_date", "serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection activitypub ----#", "activitypub ----# @classmethod def replies(cls, status): ''' load all replies to a status.", "self.name ) @property def pure_content(self): ''' indicate the book in question for mastodon", "'<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type = 'Article' class", "''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function for loading AP serialized", "liked, followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT,", "it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): ''' expose the", "= models.DateTimeField( blank=True, null=True) def save(self, *args, **kwargs): ''' update user active time", "a post ''' boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer =", "user activity ''' @property def pure_content(self): ''' indicate the book in question for", "django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from model_utils.managers import InheritanceManager from", "activity['cc'] = [public] + mentions elif self.privacy == 'followers': activity['to'] = [followers] activity['cc']", "= ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through a book in the", "= models.ForeignKey('User', on_delete=models.PROTECT) related_book = models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User',", "models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks if notifcation is in enum list", "<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note' class", "activitypub.Comment pure_type = 'Note' class Quotation(Status): ''' like a review but without a", "mastodon serialization ''' if self.rating: return 'Review of \"%s\" (%d stars): %s' %", "class Meta: # unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through", "boosted_status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, related_name='boosters', activitypub_field='object', ) activity_serializer = activitypub.Boost # This", "serialization ''' if self.rating: return 'Review of \"%s\" (%d stars): %s' % (", "tombstone if the status is deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id,", "mentions if self.privacy == 'direct': activity['to'] = mentions activity['cc'] = [] # \"pure\"", "'Article' class Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post ''' user = fields.ForeignKey( 'User',", "Favorite(ActivitypubMixin, BookWyrmModel): ''' fav'ing a post ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor')", "''' can't fav things twice ''' unique_together = ('user', 'status') class Boost(Status): '''", "**kwargs): ''' update user active time ''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save()", "'Review of \"%s\": %s' % ( self.book.title, self.name ) @property def pure_content(self): '''", "like a review but without a rating and transient ''' quote = fields.TextField()", "to_activity(self, pure=False): ''' return tombstone if the status is deleted ''' if self.deleted:", "%s %s' % (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class", "= models.ForeignKey('User', on_delete=models.PROTECT) book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date", "= InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')]", "'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def", "followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] = [public]", "if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies']", "message = self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\", "a rating and transient ''' quote = fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT,", "self.book.title, self.rating, self.name ) return 'Review of \"%s\": %s' % ( self.book.title, self.name", "models.ForeignKey( 'Edition', on_delete=models.PROTECT, null=True) related_user = models.ForeignKey( 'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status =", "if self.privacy == 'direct': activity['to'] = mentions activity['cc'] = [] # \"pure\" serialization", "self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) class Meta: ''' can't fav things twice", "favorites = models.ManyToManyField( 'User', symmetrical=False, through='Favorite', through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey(", "users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content, )", "link to the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public':", "the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] =", "= self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] = [public] activity['cc'] = [followers]", "for loading AP serialized replies to a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies'", "remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self, pure=False): ''' return tombstone if the", "different kinds of Activities ''' from django.utils import timezone from django.core.validators import MaxValueValidator,", "it would cross tables. # class Meta: # unique_together = ('user', 'boosted_status') class", "def save(self, *args, **kwargs): ''' update user active time ''' self.user.last_active_date = timezone.now()", "from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a reply", "models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): '''", "if notifcation is in enum list for valid types ''' constraints = [", "%s' % ( self.book.title, self.name ) @property def pure_content(self): ''' indicate the book", "(or w/e) users ''' message = self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>'", "names for mastodon serialization ''' if self.rating: return 'Review of \"%s\" (%d stars):", ") return '%s %s %s' % (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type", "on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True, null=True) finish_date =", "return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer =", "'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property", "activity_serializer = activitypub.Like def save(self, *args, **kwargs): ''' update user active time '''", "bookwyrm import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels", "= [public] activity['cc'] = [followers] + mentions elif self.privacy == 'unlisted': activity['to'] =", "are app-generated messages about user activity ''' @property def pure_content(self): ''' indicate the", "This constraint can't work as it would cross tables. # class Meta: #", "image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a reply to a review,", "BookWyrmModel): ''' any post, like a reply to a review, etc ''' user", "status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self, pure=False):", "self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type =", "+ '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type = 'Article'", "time ''' if self.user.local: self.user.last_active_date = timezone.now() self.user.save() return super().save(*args, **kwargs) class GeneratedNote(Status):", "related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager()", "% ( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type = 'Note'", "update user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType =", "to_replies(self, **kwargs): ''' helper function for loading AP serialized replies to a status", "default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True, null=True) favorites = models.ManyToManyField( 'User',", "on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Comment pure_type = 'Note'", "activity['attachment'] = [ image_serializer(b.cover) for b in self.mention_books.all() \\ if b.cover] if hasattr(self,", "choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the created date can't be this, because", "this so it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses() @property def status_type(self): '''", "status_type(self): ''' expose the type of status for the ui using activity type", "\"%s\": %s' % ( self.book.title, self.name ) @property def pure_content(self): ''' indicate the", "and transient ''' quote = fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property", "FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been tagged, liked, followed, etc", "activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions", "type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): ''' helper function for loading AP", "= self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions = [u.remote_id for u", "quote = fields.TextField() book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): '''", "return super().save(*args, **kwargs) class GeneratedNote(Status): ''' these are app-generated messages about user activity", "function for loading AP serialized replies to a status ''' return self.to_ordered_collection( self.replies(self),", "load all replies to a status. idk if there's a better way to", "book = models.ForeignKey('Book', on_delete=models.PROTECT) pages_read = models.IntegerField( null=True, blank=True) start_date = models.DateTimeField( blank=True,", "because of receiving federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False)", "super().save(*args, **kwargs) class Meta: ''' can't fav things twice ''' unique_together = ('user',", "FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been tagged, liked, followed, etc '''", "[('attachments', 'attachment')] #----- replies collection activitypub ----# @classmethod def replies(cls, status): ''' load", "to a review, etc ''' user = fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content =", "super().save(*args, **kwargs) NotificationType = models.TextChoices( 'NotificationType', 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST", "return 'Review of \"%s\" (%d stars): %s' % ( self.book.title, self.rating, self.name )", "activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self):", "the status is deleted ''' if self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat()", "return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies()", "w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content,", "Meta: # unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through a", "url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls", "activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b in", "activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book')", "activity: activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b", "in question for mastodon (or w/e) users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)'", "[u.remote_id for u in self.mention_users.all()] # this is a link to the followers", "# \"pure\" serialization for non-bookwyrm instances if pure: activity['content'] = self.pure_content if 'name'", "<a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type", "status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args, **kwargs):", "= self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id, book.title) \\ for", "fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields", "'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books =", "through_fields=('status', 'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects", "OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from . import fields from .fields import", "book review ''' name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')", "''' a book review ''' name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition',", "= self.pure_name activity['type'] = self.pure_type activity['attachment'] = [ image_serializer(b.cover) for b in self.mention_books.all()", "----# @classmethod def replies(cls, status): ''' load all replies to a status. idk", "InheritanceManager() activity_serializer = activitypub.Note serialize_reverse_fields = [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #-----", "''' update user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs) NotificationType", "**kwargs): ''' update user active time ''' self.user.last_active_date = timezone.now() self.user.save() super().save(*args, **kwargs)", "notifcation is in enum list for valid types ''' constraints = [ models.CheckConstraint(", "= [('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection activitypub ----# @classmethod", "(book.remote_id, book.title) \\ for book in self.mention_books.all() ) return '%s %s %s' %", "rating and transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self):", "idk if there's a better way to write this so it's just a", "self.privacy == 'direct': activity['to'] = mentions activity['cc'] = [] # \"pure\" serialization for", "null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True,", "a status ''' return self.to_ordered_collection( self.replies(self), remote_id='%s/replies' % self.remote_id, **kwargs ) def to_activity(self,", "= models.BooleanField(default=True) privacy = models.CharField( max_length=255, default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) #", "for the ui using activity type ''' return self.activity_serializer.__name__ def to_replies(self, **kwargs): '''", "in self.mention_users.all()] # this is a link to the followers list: followers =", "is a link to the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy", "fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property def pure_content(self): ''' indicate the book in question", "'User', on_delete=models.PROTECT, null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey(", "[('attachments', 'attachment')] deserialize_reverse_fields = [('attachments', 'attachment')] #----- replies collection activitypub ----# @classmethod def", "''' like a review but without a rating and transient ''' book =", "self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type = 'Note' class Review(Status): ''' a", ") @property def pure_content(self): ''' indicate the book in question for mastodon (or", "validators=[MinValueValidator(1), MaxValueValidator(5)] ) @property def pure_name(self): ''' clarify review names for mastodon serialization", "review names for mastodon serialization ''' if self.rating: return 'Review of \"%s\" (%d", "# unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through a book", "fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True,", "users ''' message = self.content books = ', '.join( '<a href=\"%s\">\"%s\"</a>' % (book.remote_id,", "href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote, self.book.remote_id, self.book.title, self.content, ) activity_serializer = activitypub.Quotation pure_type =", "import BookWyrmModel, PrivacyLevels from . import fields from .fields import image_serializer class Status(OrderedCollectionPageMixin,", "= [] # \"pure\" serialization for non-bookwyrm instances if pure: activity['content'] = self.pure_content", "activitypub_field='object', ) activity_serializer = activitypub.Boost # This constraint can't work as it would", "'boosted_status') class ReadThrough(BookWyrmModel): ''' Store progress through a book in the database. '''", "'attachment')] #----- replies collection activitypub ----# @classmethod def replies(cls, status): ''' load all", "a link to the followers list: followers = self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy ==", "[] # \"pure\" serialization for non-bookwyrm instances if pure: activity['content'] = self.pure_content if", "self.pure_content if 'name' in activity: activity['name'] = self.pure_name activity['type'] = self.pure_type activity['attachment'] =", "activity_serializer = activitypub.Quotation pure_type = 'Note' class Review(Status): ''' a book review '''", "fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='actor') status = fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like", "for mastodon (or w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s' % ( self.quote,", "return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type", "question for mastodon (or w/e) users ''' return self.content + '<br><br>(<a href=\"%s\">\"%s\"</a>)' %", "class Comment(Status): ''' like a review but without a rating and transient '''", "better way to write this so it's just a property ''' return cls.objects.filter(reply_parent=status).select_subclasses()", "href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer = activitypub.Review pure_type = 'Article' class Favorite(ActivitypubMixin,", "'Review of \"%s\" (%d stars): %s' % ( self.book.title, self.rating, self.name ) return", "return self.content + '<br><br>(comment on <a href=\"%s\">\"%s\"</a>)' % \\ (self.book.remote_id, self.book.title) activity_serializer =", "= ActivitypubMixin.to_activity(self) activity['replies'] = self.to_replies() # privacy controls public = 'https://www.w3.org/ns/activitystreams#Public' mentions =", "without a rating and transient ''' book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') @property", "about user activity ''' @property def pure_content(self): ''' indicate the book in question", ") return activity def save(self, *args, **kwargs): ''' update user active time '''", "REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been tagged,", "kinds of Activities ''' from django.utils import timezone from django.core.validators import MaxValueValidator, MinValueValidator", "%s' % (self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class Comment(Status):", "self.deleted: return activitypub.Tombstone( id=self.remote_id, url=self.remote_id, deleted=self.deleted_date.isoformat(), published=self.deleted_date.isoformat() ).serialize() activity = ActivitypubMixin.to_activity(self) activity['replies'] =", "''' models for storing different kinds of Activities ''' from django.utils import timezone", "work as it would cross tables. # class Meta: # unique_together = ('user',", "= fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField( default=None, null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(5)]", "TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been tagged, liked, followed,", "self.book.title, self.name ) @property def pure_content(self): ''' indicate the book in question for", "from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import BookWyrmModel, PrivacyLevels from . import", "self.rating: return 'Review of \"%s\" (%d stars): %s' % ( self.book.title, self.rating, self.name", "book in question for mastodon (or w/e) users ''' return '\"%s\"<br>-- <a href=\"%s\">\"%s\"</a><br><br>%s'", "fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books = fields.TagField('Edition', related_name='mention_book') local = models.BooleanField(default=True)", "= [followers] + mentions elif self.privacy == 'unlisted': activity['to'] = [followers] activity['cc'] =", "( self.book.title, self.name ) @property def pure_content(self): ''' indicate the book in question", "(self.user.display_name, message, books) activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): ''' like", "federated posts published_date = fields.DateTimeField( default=timezone.now, activitypub_field='published') deleted = models.BooleanField(default=False) deleted_date = models.DateTimeField(blank=True,", "'user'), related_name='user_favorites' ) reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects =", "question for mastodon (or w/e) users ''' return self.content + '<br><br>(comment on <a", "unique_together = ('user', 'status') class Boost(Status): ''' boost'ing a post ''' boosted_status =", "reply_parent = fields.ForeignKey( 'self', null=True, on_delete=models.PROTECT, activitypub_field='inReplyTo', ) objects = InheritanceManager() activity_serializer =", "= fields.ForeignKey( 'Status', on_delete=models.PROTECT, activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args, **kwargs): '''", "fields.ForeignKey( 'User', on_delete=models.PROTECT, activitypub_field='attributedTo') content = fields.TextField(blank=True, null=True) mention_users = fields.TagField('User', related_name='mention_user') mention_books", "default='public', choices=PrivacyLevels.choices ) sensitive = fields.BooleanField(default=False) # the created date can't be this,", "''' checks if notifcation is in enum list for valid types ''' constraints", "self.user.__class__._meta.get_field('followers')\\ .field_to_activity(self.user.followers) if self.privacy == 'public': activity['to'] = [public] activity['cc'] = [followers] +", "''' you've been tagged, liked, followed, etc ''' user = models.ForeignKey('User', on_delete=models.PROTECT) related_book", "name = fields.CharField(max_length=255, null=True) book = fields.ForeignKey( 'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook') rating = fields.IntegerField(", "MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT') class Notification(BookWyrmModel): ''' you've been tagged, liked,", "activity_serializer = activitypub.GeneratedNote pure_type = 'Note' class Comment(Status): ''' like a review but", "models from model_utils.managers import InheritanceManager from bookwyrm import activitypub from .base_model import ActivitypubMixin,", "there's a better way to write this so it's just a property '''", "tables. # class Meta: # unique_together = ('user', 'boosted_status') class ReadThrough(BookWyrmModel): ''' Store", "**kwargs): ''' helper function for loading AP serialized replies to a status '''", "activitypub_field='object') activity_serializer = activitypub.Like def save(self, *args, **kwargs): ''' update user active time", "fields from .fields import image_serializer class Status(OrderedCollectionPageMixin, BookWyrmModel): ''' any post, like a", "self.name ) return 'Review of \"%s\": %s' % ( self.book.title, self.name ) @property", "'book'): activity['attachment'].append( image_serializer(self.book.cover) ) return activity def save(self, *args, **kwargs): ''' update user", "fields.BooleanField(default=False) # the created date can't be this, because of receiving federated posts", "activity['cc'] = mentions if self.privacy == 'direct': activity['to'] = mentions activity['cc'] = []", "through a book in the database. ''' user = models.ForeignKey('User', on_delete=models.PROTECT) book =", "null=True) read = models.BooleanField(default=False) notification_type = models.CharField( max_length=255, choices=NotificationType.choices) class Meta: ''' checks", "without a rating and transient ''' quote = fields.TextField() book = fields.ForeignKey( 'Edition',", "activity['to'] = mentions activity['cc'] = [] # \"pure\" serialization for non-bookwyrm instances if", "max_length=255, choices=NotificationType.choices) class Meta: ''' checks if notifcation is in enum list for", "= [followers] activity['cc'] = [public] + mentions elif self.privacy == 'followers': activity['to'] =", "null=True, related_name='related_user') related_status = models.ForeignKey( 'Status', on_delete=models.PROTECT, null=True) related_import = models.ForeignKey( 'ImportJob', on_delete=models.PROTECT,", "review but without a rating and transient ''' quote = fields.TextField() book =", "self.remote_id, **kwargs ) def to_activity(self, pure=False): ''' return tombstone if the status is", "InheritanceManager from bookwyrm import activitypub from .base_model import ActivitypubMixin, OrderedCollectionPageMixin from .base_model import" ]
[ "import LINE_LIMIT from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import", "import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import BORDER from .utils import", "Direction from .directions import direction_to_vector from .directions import vector_to_direction from .constants import ASSETS", "vector_to_direction from .constants import ASSETS from .constants import LINE_LIMIT from .constants import TILESIZE", "from .constants import TETROMINO_GRID_SIZE from .constants import BORDER from .utils import load_grid from", "import vector_to_direction from .constants import ASSETS from .constants import LINE_LIMIT from .constants import", "import BORDER from .utils import load_grid from .matrix_util import MatrixUtil from .gframe import", "import direction_to_vector from .directions import vector_to_direction from .constants import ASSETS from .constants import", ".constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import BORDER from .utils", "<reponame>JacobChen258/AI-Constraints-Satisfaction from .directions import Direction from .directions import direction_to_vector from .directions import vector_to_direction", ".directions import direction_to_vector from .directions import vector_to_direction from .constants import ASSETS from .constants", "TETROMINO_GRID_SIZE from .constants import BORDER from .utils import load_grid from .matrix_util import MatrixUtil", "from .directions import vector_to_direction from .constants import ASSETS from .constants import LINE_LIMIT from", "from .directions import Direction from .directions import direction_to_vector from .directions import vector_to_direction from", "ASSETS from .constants import LINE_LIMIT from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE", ".constants import LINE_LIMIT from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants", "from .constants import LINE_LIMIT from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from", "from .constants import BORDER from .utils import load_grid from .matrix_util import MatrixUtil from", "LINE_LIMIT from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import BORDER", "from .constants import TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import BORDER from", ".constants import TETROMINO_GRID_SIZE from .constants import BORDER from .utils import load_grid from .matrix_util", "direction_to_vector from .directions import vector_to_direction from .constants import ASSETS from .constants import LINE_LIMIT", "TILESIZE from .constants import TETROMINO_GRID_SIZE from .constants import BORDER from .utils import load_grid", "BORDER from .utils import load_grid from .matrix_util import MatrixUtil from .gframe import GFrame", "from .constants import ASSETS from .constants import LINE_LIMIT from .constants import TILESIZE from", ".directions import vector_to_direction from .constants import ASSETS from .constants import LINE_LIMIT from .constants", "from .directions import direction_to_vector from .directions import vector_to_direction from .constants import ASSETS from", "import ASSETS from .constants import LINE_LIMIT from .constants import TILESIZE from .constants import", "import TETROMINO_GRID_SIZE from .constants import BORDER from .utils import load_grid from .matrix_util import", "import Direction from .directions import direction_to_vector from .directions import vector_to_direction from .constants import", ".directions import Direction from .directions import direction_to_vector from .directions import vector_to_direction from .constants", ".constants import BORDER from .utils import load_grid from .matrix_util import MatrixUtil from .gframe", ".constants import ASSETS from .constants import LINE_LIMIT from .constants import TILESIZE from .constants" ]
[ "gameObject class GameObject(): # constructor # param x: the window of the game", "= x self.y = y self.displayable = displayable # draw the gameObject #", "draw the gameObject # param window: the window of the game def draw(self,", "displayable # draw the gameObject # param window: the window of the game", "# implementing a gameObject class GameObject(): # constructor # param x: the window", "x, y, displayable): self.x = x self.y = y self.displayable = displayable #", "window: the window of the game def draw(self, window): pass # update the", "<filename>src/GameObject.py #!/usr/bin/python3 # implementing a gameObject class GameObject(): # constructor # param x:", "GameObject(): # constructor # param x: the window of the game def __init__(self,", "of the game def __init__(self, x, y, displayable): self.x = x self.y =", "#!/usr/bin/python3 # implementing a gameObject class GameObject(): # constructor # param x: the", "__init__(self, x, y, displayable): self.x = x self.y = y self.displayable = displayable", "self.displayable = displayable # draw the gameObject # param window: the window of", "gameObject # param window: the window of the game def draw(self, window): pass", "the window of the game def draw(self, window): pass # update the gameObject", "= y self.displayable = displayable # draw the gameObject # param window: the", "implementing a gameObject class GameObject(): # constructor # param x: the window of", "window of the game def __init__(self, x, y, displayable): self.x = x self.y", "# draw the gameObject # param window: the window of the game def", "x self.y = y self.displayable = displayable # draw the gameObject # param", "param window: the window of the game def draw(self, window): pass # update", "the window of the game def __init__(self, x, y, displayable): self.x = x", "window of the game def draw(self, window): pass # update the gameObject def", "game def __init__(self, x, y, displayable): self.x = x self.y = y self.displayable", "y self.displayable = displayable # draw the gameObject # param window: the window", "a gameObject class GameObject(): # constructor # param x: the window of the", "the gameObject # param window: the window of the game def draw(self, window):", "class GameObject(): # constructor # param x: the window of the game def", "def __init__(self, x, y, displayable): self.x = x self.y = y self.displayable =", "displayable): self.x = x self.y = y self.displayable = displayable # draw the", "y, displayable): self.x = x self.y = y self.displayable = displayable # draw", "self.y = y self.displayable = displayable # draw the gameObject # param window:", "x: the window of the game def __init__(self, x, y, displayable): self.x =", "self.x = x self.y = y self.displayable = displayable # draw the gameObject", "param x: the window of the game def __init__(self, x, y, displayable): self.x", "# param window: the window of the game def draw(self, window): pass #", "of the game def draw(self, window): pass # update the gameObject def update(self):", "the game def draw(self, window): pass # update the gameObject def update(self): pass", "constructor # param x: the window of the game def __init__(self, x, y,", "# constructor # param x: the window of the game def __init__(self, x,", "the game def __init__(self, x, y, displayable): self.x = x self.y = y", "= displayable # draw the gameObject # param window: the window of the", "# param x: the window of the game def __init__(self, x, y, displayable):" ]
[ "winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog =", "+ subkey + '\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error", "QueryValueEx) softFile = open('softLog.log', 'w') errorLog = open('errors.log', 'w') r = wmi.Registry ()", "import traceback import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile", "= keyPath + \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try:", "result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\")", "key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display =", "import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log',", "'w') errorLog = open('errors.log', 'w') r = wmi.Registry () result, names = r.EnumKey", "subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath =", "errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names:", "subkey in names: try: softFile.write(separator + '\\n\\n') path = keyPath + \"\\\\\" +", "= \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator", "'\\n') except: softFile.write('Regkey: ' + subkey + '\\n') except: fp = StringIO() traceback.print_exc(file=fp)", "keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator + '\\n\\n') path =", "= str(temp[0]) softFile.write('Display Name: ' + display + '\\nRegkey: ' + subkey +", "r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\"", "= fp.getvalue() error = 'Error for ' + key + '. Message follows:\\n'", "80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator + '\\n\\n') path", "r = wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are", "\"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in", "' + subkey + '\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue()", "subkey + '\\n') except: softFile.write('Regkey: ' + subkey + '\\n') except: fp =", "<filename>crossbaker/libs/softFinder.py from io import StringIO import traceback import wmi from winreg import (HKEY_LOCAL_MACHINE,", "error = 'Error for ' + key + '. Message follows:\\n' + errorMessage", "names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator", "() result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n')", "sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80", "softFile.write(separator + '\\n\\n') path = keyPath + \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE,", "for ' + key + '. Message follows:\\n' + errorMessage errorLog.write(error) errorLog.write(\"\\n\\n\") softFile.close()", "' + key + '. Message follows:\\n' + errorMessage errorLog.write(error) errorLog.write(\"\\n\\n\") softFile.close() errorLog.close()", "+ \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp =", "wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under", "r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator + '\\n\\n') path = keyPath +", "import StringIO import traceback import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue,", "+ '\\n\\n') path = keyPath + \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path,", "'w') r = wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys", "from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog", "= wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found", "= open('softLog.log', 'w') errorLog = open('errors.log', 'w') r = wmi.Registry () result, names", "= r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator =", "\"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key,", "try: softFile.write(separator + '\\n\\n') path = keyPath + \"\\\\\" + subkey key =", "+ '\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error", "* 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator + '\\n\\n')", "names: try: softFile.write(separator + '\\n\\n') path = keyPath + \"\\\\\" + subkey key", "wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w')", "separator = \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try:", "display = str(temp[0]) softFile.write('Display Name: ' + display + '\\nRegkey: ' + subkey", "'\\n\\n') path = keyPath + \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0,", "= r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator + '\\n\\n') path = keyPath", "fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error for ' +", "str(temp[0]) softFile.write('Display Name: ' + display + '\\nRegkey: ' + subkey + '\\n')", "QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name: ' + display + '\\nRegkey: '", "for subkey in names: try: softFile.write(separator + '\\n\\n') path = keyPath + \"\\\\\"", "Name: ' + display + '\\nRegkey: ' + subkey + '\\n') except: softFile.write('Regkey:", "softFile.write('Display Name: ' + display + '\\nRegkey: ' + subkey + '\\n') except:", "+ display + '\\nRegkey: ' + subkey + '\\n') except: softFile.write('Regkey: ' +", "try: temp = QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name: ' + display", "path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name:", "found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for", "(HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog = open('errors.log', 'w')", "softFile.write('Regkey: ' + subkey + '\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage =", "traceback import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile =", "OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display", "softFile = open('softLog.log', 'w') errorLog = open('errors.log', 'w') r = wmi.Registry () result,", "from io import StringIO import traceback import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS,", "are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"", "errorLog = open('errors.log', 'w') r = wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE,", "open('softLog.log', 'w') errorLog = open('errors.log', 'w') r = wmi.Registry () result, names =", "subkey + '\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error =", "StringIO import traceback import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx)", "open('errors.log', 'w') r = wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These", "+ '\\n') except: softFile.write('Regkey: ' + subkey + '\\n') except: fp = StringIO()", "'\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error for", "KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name: ' +", "EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog = open('errors.log', 'w') r = wmi.Registry", "import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog = open('errors.log',", "= QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name: ' + display + '\\nRegkey:", "OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog = open('errors.log', 'w') r =", "'DisplayName') display = str(temp[0]) softFile.write('Display Name: ' + display + '\\nRegkey: ' +", "+ subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName')", "under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey", "= 'Error for ' + key + '. Message follows:\\n' + errorMessage errorLog.write(error)", "= open('errors.log', 'w') r = wmi.Registry () result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\")", "' + subkey + '\\n') except: softFile.write('Regkey: ' + subkey + '\\n') except:", "KEY_ALL_ACCESS, OpenKey, EnumValue, QueryValueEx) softFile = open('softLog.log', 'w') errorLog = open('errors.log', 'w') r", "traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error for ' + key + '.", "'\\nRegkey: ' + subkey + '\\n') except: softFile.write('Regkey: ' + subkey + '\\n')", "io import StringIO import traceback import wmi from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS, OpenKey,", "' + display + '\\nRegkey: ' + subkey + '\\n') except: softFile.write('Regkey: '", "subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display", "+ subkey + '\\n') except: softFile.write('Regkey: ' + subkey + '\\n') except: fp", "display + '\\nRegkey: ' + subkey + '\\n') except: softFile.write('Regkey: ' + subkey", "fp.getvalue() error = 'Error for ' + key + '. Message follows:\\n' +", "except: fp = StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error for '", "0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name: '", "+ '\\nRegkey: ' + subkey + '\\n') except: softFile.write('Regkey: ' + subkey +", "(hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\") softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" *", "\"*\" * 80 keyPath = r\"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\" for subkey in names: try: softFile.write(separator +", "'Error for ' + key + '. Message follows:\\n' + errorMessage errorLog.write(error) errorLog.write(\"\\n\\n\")", "softFile.write(r'These subkeys are found under \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\"\\n\\n') errorLog.write(\"Errors\\n\\n\") separator = \"*\" * 80 keyPath", "temp = QueryValueEx(key, 'DisplayName') display = str(temp[0]) softFile.write('Display Name: ' + display +", "in names: try: softFile.write(separator + '\\n\\n') path = keyPath + \"\\\\\" + subkey", "StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error for ' + key +", "= StringIO() traceback.print_exc(file=fp) errorMessage = fp.getvalue() error = 'Error for ' + key", "keyPath + \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp", "path = keyPath + \"\\\\\" + subkey key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS)", "except: softFile.write('Regkey: ' + subkey + '\\n') except: fp = StringIO() traceback.print_exc(file=fp) errorMessage", "errorMessage = fp.getvalue() error = 'Error for ' + key + '. Message", "= OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS) try: temp = QueryValueEx(key, 'DisplayName') display = str(temp[0])" ]
[ "message = { \"id\": \"test:1\" } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.stop.test.test_usertimeline\" self.stream_consumer.on_message()", "shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor", "self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path,", "TestCase from mock import MagicMock, patch import socket import tempfile import os import", "import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor", "= tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\",", "= [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None,", "import shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import", "def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect =", "def test_remove(self): message = { \"id\": \"test:1\" } self.stream_consumer.message = message self.stream_consumer.routing_key =", "\"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\":", "MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None,", "= \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = {", "[self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\":", "class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor)", "None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if", "patch import socket import tempfile import os import shutil from sfmutils.consumer import MqConfig", "stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message =", "StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class", "unittest import TestCase from mock import MagicMock, patch import socket import tempfile import", "= { \"id\": \"test:1\" } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.stop.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.remove.called_once_with(\"test:1\")", "self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\": \"test:1\" } self.stream_consumer.message =", "\"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\"", "import MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def", "mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None,", "HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor =", "= { \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message = message", "message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\":", "= patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path =", "self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\",", "self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [", "StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self):", "def test_start(self): message = { \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } }", "from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class =", "[ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self):", "= MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None,", "import TestCase from mock import MagicMock, patch import socket import tempfile import os", "= \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\": \"test:1\" }", "mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer", "TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect", "tempfile.mkdtemp() self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}),", "from mock import MagicMock, patch import socket import tempfile import os import shutil", "self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message =", "set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\": \"test:1\", \"collection_set\": {", "absolute_import from unittest import TestCase from mock import MagicMock, patch import socket import", "shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def", "= self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp() self.stream_consumer =", "from unittest import TestCase from mock import MagicMock, patch import socket import tempfile", "self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\": \"test:1\" } self.stream_consumer.message = message", "self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path", "\"id\": \"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\")", "\"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\": \"test:1\" } self.stream_consumer.message = message self.stream_consumer.routing_key", "\"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue =", "MagicMock, patch import socket import tempfile import os import shutil from sfmutils.consumer import", "sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase):", "MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self):", "def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self):", "tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue},", "} self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message", "socket import tempfile import os import shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer", "\"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key =", "# self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys()))", "os import shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor", "} } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self):", "import MagicMock, patch import socket import tempfile import os import shutil from sfmutils.consumer", "self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\": \"test:1\",", "from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class", "from __future__ import absolute_import from unittest import TestCase from mock import MagicMock, patch", "{ \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key", "import absolute_import from unittest import TestCase from mock import MagicMock, patch import socket", "{ \"id\": \"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message,", "test_start(self): message = { \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message", "{\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def", "\"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message()", "\"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\"", "self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): #", "None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path)", ") def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname())", "setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor]", "None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path):", "self.stream_consumer = StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), )", "= StreamConsumer(\"/opt/sfm/test.py\", self.working_path, mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def", "test_remove(self): message = { \"id\": \"test:1\" } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.stop.test.test_usertimeline\"", "__future__ import absolute_import from unittest import TestCase from mock import MagicMock, patch import", "sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start()", "patch(\"sfmutils.stream_consumer.HarvestSupervisor\") mock_supervisor_class = self.patcher.start() self.mock_supervisor = MagicMock(spec=HarvestSupervisor) mock_supervisor_class.side_effect = [self.mock_supervisor] self.working_path = tempfile.mkdtemp()", "import os import shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer from", "test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message", "message = { \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" } } self.stream_consumer.message =", "mq_config=MqConfig(None, None, None, None, {\"test_queue\": [ \"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove()", "self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\",", "\"harvest.start.test.test_usertimeline\", \"harvest.start.test.test_search\"]}), ) def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue", "import tempfile import os import shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import", "if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"],", "os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\", stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue])", "import socket import tempfile import os import shutil from sfmutils.consumer import MqConfig from", "stop_queue}, set(self.stream_consumer.mq_config.queues.keys())) self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\": \"test:1\", \"collection_set\":", "def tearDown(self): # self.patcher.remove() if os.path.exists(self.working_path): shutil.rmtree(self.working_path) def test_stop_queue(self): stop_queue = \"test_queue_{}\".format(socket.gethostname()) self.assertSetEqual({\"test_queue\",", "self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\": \"test:1\"", "from sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher", "= message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = {", "sfmutils.stream_consumer import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher =", "import StreamConsumer from sfmutils.supervisor import HarvestSupervisor class TestStreamConsumer(TestCase): def setUp(self): self.patcher = patch(\"sfmutils.stream_consumer.HarvestSupervisor\")", "self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\": \"test:1\", \"collection_set\": { \"id\": \"test_collection_set\" }", "self.assertListEqual([\"harvest.stop.test.test_usertimeline\", \"harvest.stop.test.test_search\"], self.stream_consumer.mq_config.queues[stop_queue]) def test_start(self): message = { \"id\": \"test:1\", \"collection_set\": { \"id\":", "mock import MagicMock, patch import socket import tempfile import os import shutil from", "tempfile import os import shutil from sfmutils.consumer import MqConfig from sfmutils.stream_consumer import StreamConsumer", "\"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def test_remove(self): message = { \"id\": \"test:1\" } self.stream_consumer.message", "\"test_collection_set\" } } self.stream_consumer.message = message self.stream_consumer.routing_key = \"harvest.start.test.test_usertimeline\" self.stream_consumer.on_message() self.mock_supervisor.start.called_once_with(message, \"harvest.start.test.test_usertimeline\") def" ]
[ "new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east new_south = [] for sc in", "in enumerate(lines): for x, sc in enumerate(line): if sc == '>': east.append((x,y)) elif", "for sc in south: x, y = sc ny = y + 1", "sc ny = y + 1 if y + 1 < rows else", "_map = {} east = [] south = [] for y, line in", "in count(1): east_move = False south_move = False new_map = {} new_east =", "new_south.append((x, ny)) south_move = True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south", "new_south.append((x,y)) south = new_south _map = new_map if not east_move and not south_move:", "{} east = [] south = [] for y, line in enumerate(lines): for", "new_map = {} new_east = [] for sc in east: x, y =", "'.') == '.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny))", "import count from aocd import lines rows = len(lines) cols = len(lines[0]) _map", "new_south _map = new_map if not east_move and not south_move: break print('Part 1:',", "= len(lines[0]) _map = {} east = [] south = [] for y,", "east = new_east new_south = [] for sc in south: x, y =", "east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)] = sc for step in count(1):", "rows = len(lines) cols = len(lines[0]) _map = {} east = [] south", "in east: x, y = sc nx = x + 1 if x", "== '.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move = True else: new_map[(x,y)] =", "new_east.append((nx, y)) east_move = True else: new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east", "== 'v': south.append((x,y)) _map[(x,y)] = sc for step in count(1): east_move = False", "nx = x + 1 if x + 1 < cols else 0", "else 0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move", "[] south = [] for y, line in enumerate(lines): for x, sc in", "x, y = sc nx = x + 1 if x + 1", "'.') == '.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move = True else: new_map[(x,y)]", "+ 1 < cols else 0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)] =", "= {} east = [] south = [] for y, line in enumerate(lines):", "from aocd import lines rows = len(lines) cols = len(lines[0]) _map = {}", "== '>': east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)] = sc for step", "sc for step in count(1): east_move = False south_move = False new_map =", "count from aocd import lines rows = len(lines) cols = len(lines[0]) _map =", "= x + 1 if x + 1 < cols else 0 if", "[] for y, line in enumerate(lines): for x, sc in enumerate(line): if sc", "1 < cols else 0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>'", "sc == 'v': south.append((x,y)) _map[(x,y)] = sc for step in count(1): east_move =", "{} new_east = [] for sc in east: x, y = sc nx", "new_south = [] for sc in south: x, y = sc ny =", "x, sc in enumerate(line): if sc == '>': east.append((x,y)) elif sc == 'v':", "in south: x, y = sc ny = y + 1 if y", "if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v'", "line in enumerate(lines): for x, sc in enumerate(line): if sc == '>': east.append((x,y))", "new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x,", "= new_east new_south = [] for sc in south: x, y = sc", "'.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move = True else: new_map[(x,y)]", "from itertools import count from aocd import lines rows = len(lines) cols =", "'v' new_south.append((x, ny)) south_move = True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south =", "x + 1 < cols else 0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)]", "= [] for y, line in enumerate(lines): for x, sc in enumerate(line): if", "[] for sc in south: x, y = sc ny = y +", "sc nx = x + 1 if x + 1 < cols else", "= len(lines) cols = len(lines[0]) _map = {} east = [] south =", "True else: new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east new_south = [] for", "_map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move = True else:", "else: new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south _map = new_map if not", "= [] for sc in south: x, y = sc ny = y", "and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move = True", "aocd import lines rows = len(lines) cols = len(lines[0]) _map = {} east", "south = [] for y, line in enumerate(lines): for x, sc in enumerate(line):", "count(1): east_move = False south_move = False new_map = {} new_east = []", "sc == '>': east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)] = sc for", "0 if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] =", "for y, line in enumerate(lines): for x, sc in enumerate(line): if sc ==", "south = new_south _map = new_map if not east_move and not south_move: break", "+ 1 if y + 1 < rows else 0 if new_map.get((x,ny), '.')", "= 'v' new_south.append((x,y)) south = new_south _map = new_map if not east_move and", "if x + 1 < cols else 0 if _map.get((nx,y), '.') == '.':", "sc in south: x, y = sc ny = y + 1 if", "new_east = [] for sc in east: x, y = sc nx =", "import lines rows = len(lines) cols = len(lines[0]) _map = {} east =", "new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south _map = new_map if not east_move", "sc in east: x, y = sc nx = x + 1 if", "'v' new_south.append((x,y)) south = new_south _map = new_map if not east_move and not", "= sc ny = y + 1 if y + 1 < rows", "new_east new_south = [] for sc in south: x, y = sc ny", "1 if x + 1 < cols else 0 if _map.get((nx,y), '.') ==", "'.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move = True else: new_map[(x,y)] = '>'", "= [] for sc in east: x, y = sc nx = x", "for step in count(1): east_move = False south_move = False new_map = {}", "ny = y + 1 if y + 1 < rows else 0", "= '>' new_east.append((x,y)) east = new_east new_south = [] for sc in south:", "south.append((x,y)) _map[(x,y)] = sc for step in count(1): east_move = False south_move =", "east_move = True else: new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east new_south =", "step in count(1): east_move = False south_move = False new_map = {} new_east", "x, y = sc ny = y + 1 if y + 1", "y + 1 < rows else 0 if new_map.get((x,ny), '.') == '.' and", "1 < rows else 0 if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.')", "cols else 0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>' new_east.append((nx, y))", "in enumerate(line): if sc == '>': east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)]", "y)) east_move = True else: new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east new_south", "rows else 0 if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v':", "= {} new_east = [] for sc in east: x, y = sc", "y = sc nx = x + 1 if x + 1 <", "= new_south _map = new_map if not east_move and not south_move: break print('Part", "y = sc ny = y + 1 if y + 1 <", "x + 1 if x + 1 < cols else 0 if _map.get((nx,y),", "1 if y + 1 < rows else 0 if new_map.get((x,ny), '.') ==", "new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move = True else: new_map[(x,y)] = '>' new_east.append((x,y))", "y + 1 if y + 1 < rows else 0 if new_map.get((x,ny),", "itertools import count from aocd import lines rows = len(lines) cols = len(lines[0])", "for sc in east: x, y = sc nx = x + 1", "< cols else 0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>' new_east.append((nx,", "y, line in enumerate(lines): for x, sc in enumerate(line): if sc == '>':", "else: new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east new_south = [] for sc", "east: x, y = sc nx = x + 1 if x +", "new_east.append((x,y)) east = new_east new_south = [] for sc in south: x, y", "enumerate(lines): for x, sc in enumerate(line): if sc == '>': east.append((x,y)) elif sc", "east_move = False south_move = False new_map = {} new_east = [] for", "_map[(x,y)] = sc for step in count(1): east_move = False south_move = False", "len(lines) cols = len(lines[0]) _map = {} east = [] south = []", "south_move = False new_map = {} new_east = [] for sc in east:", "= True else: new_map[(x,y)] = '>' new_east.append((x,y)) east = new_east new_south = []", "[] for sc in east: x, y = sc nx = x +", "= False new_map = {} new_east = [] for sc in east: x,", "+ 1 if x + 1 < cols else 0 if _map.get((nx,y), '.')", "_map = new_map if not east_move and not south_move: break print('Part 1:', step)", "= 'v' new_south.append((x, ny)) south_move = True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south", "= y + 1 if y + 1 < rows else 0 if", "else 0 if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)]", "'>': east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)] = sc for step in", "'v': south.append((x,y)) _map[(x,y)] = sc for step in count(1): east_move = False south_move", "if sc == '>': east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)] = sc", "'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move = True else: new_map[(x,y)] = 'v'", "False new_map = {} new_east = [] for sc in east: x, y", "if y + 1 < rows else 0 if new_map.get((x,ny), '.') == '.'", "if _map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move = True", "= sc nx = x + 1 if x + 1 < cols", "= '>' new_east.append((nx, y)) east_move = True else: new_map[(x,y)] = '>' new_east.append((x,y)) east", "< rows else 0 if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') !=", "_map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move = True else:", "for x, sc in enumerate(line): if sc == '>': east.append((x,y)) elif sc ==", "south: x, y = sc ny = y + 1 if y +", "True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south _map = new_map if", "lines rows = len(lines) cols = len(lines[0]) _map = {} east = []", "east = [] south = [] for y, line in enumerate(lines): for x,", "= [] south = [] for y, line in enumerate(lines): for x, sc", "elif sc == 'v': south.append((x,y)) _map[(x,y)] = sc for step in count(1): east_move", "cols = len(lines[0]) _map = {} east = [] south = [] for", "'>' new_east.append((x,y)) east = new_east new_south = [] for sc in south: x,", "== '.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move", "sc in enumerate(line): if sc == '>': east.append((x,y)) elif sc == 'v': south.append((x,y))", "+ 1 < rows else 0 if new_map.get((x,ny), '.') == '.' and _map.get((x,ny),", "!= 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move = True else: new_map[(x,y)] =", "= True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south _map = new_map", "south_move = True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south _map =", "0 if _map.get((nx,y), '.') == '.': new_map[(nx,y)] = '>' new_east.append((nx, y)) east_move =", "= False south_move = False new_map = {} new_east = [] for sc", "'.' and _map.get((x,ny), '.') != 'v': new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move =", "ny)) south_move = True else: new_map[(x,y)] = 'v' new_south.append((x,y)) south = new_south _map", "= sc for step in count(1): east_move = False south_move = False new_map", "False south_move = False new_map = {} new_east = [] for sc in", "enumerate(line): if sc == '>': east.append((x,y)) elif sc == 'v': south.append((x,y)) _map[(x,y)] =", "new_map[(x,ny)] = 'v' new_south.append((x, ny)) south_move = True else: new_map[(x,y)] = 'v' new_south.append((x,y))", "'>' new_east.append((nx, y)) east_move = True else: new_map[(x,y)] = '>' new_east.append((x,y)) east =", "len(lines[0]) _map = {} east = [] south = [] for y, line" ]
[ "mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10,", "the np.sum of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err))", "# print('popt: ' + str(fitpars['popt'])) ## which posterior do I need to use?", "outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\",", "corresponding value of the T_R statistic at frequency f = ' + str(", "= float(len([x for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp", "= \"model1\" func2name = \"model2\" ### step 1: fit both models to observation", "str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value", "\" +/- \" + str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\" in probs.keys():", "walkers to use in MCMC. For Metropolis-Hastings, use ~10-20 and many samples For", "patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01", "the number of parameters n taken by func2. fitmethod : string, optional, default", "binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ##", "bmaxpow = np.array([x[\"bmax\" + str(b)] for x in bindicts]) maxpows_all[\"bin\" + str(b)] =", "+ str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) +", "if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat:", "+ str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \"", "possible to build up a posterior distribution for the likelihood ratios and compute", "[], [], [], [], [], [], [], [], [] bmax = int(self.ps.freq[-1] /", "p_srat = float(len([x for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat))", "for maximum power P_max = \" + str(p_s3max) + \" +/- \" +", "p_s11max = float(len([x for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max))", "20, 30, 50, 70, 100, 200, 300, 500, 700, 1000] binlist = [r", "in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\"", "plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4)", "5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) +", "func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1,", "for x in sim_lrt if x > obslrt])) / float(len(sim_lrt)) p_srat = float(len([x", "if searchfreq is None: searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ##", "- 1 bpow = binpowers[bind] brms = np.sqrt(bpow * b * self.ps.df) resfile('The", "fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in sim_pars_all]", "i.e. when only 0.05*nsim simulations are higher than this ### note: sometimes simulations", "in bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" +", "test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x ==", "outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" +", "### plot the periodogram and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if", "binps = fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for x in", "in sim_pars_all] ### get out binned powers: maxpows_all = {} binprob = {}", "is to be searched for QPOs namestr: string, optional, default \"test\" The string", "[sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars':", "30, 50, 70, 100, 200, 300, 500, 700, 1000] binlist = [r for", "optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs) = \" +", "in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow", "from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max)", "bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow = float(len([x for x in bmaxpow if", "* (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit)", "= float(len([x for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max", "power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11 bin smoothed] data/model", "\\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in", "default True If True, several diagnostic plots will be saved to disk m:", "periodogram. Needs to be a function that takes an array of frequencies and", "an array of frequencies and n parameters, and returns an array of model", "resfile(\"Analysis of Burst failed! Returning ...\") return False, False, False else: ### Step", "data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value", "parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms", "fitmethod : string, optional, default \"bfgs\" Choose the optimization algorithm used when minimizing", "approach using LRTs # # # TO DO: Need to add smoothing for", "qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate lots of", "\" + str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor not found. Cannot compute", "lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq'])", "* b * self.ps.nphots))) resfile('The upper limit on the rms amplitude at '", "class defines a Bayes object that can: # - pick between two models", "' + str(fitpars['popt'])) ## which posterior do I need to use? if self.m", "on maximum signal power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum", "\" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \" + str(p_ksp)", "x == 'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" +", "frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian", "= self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings for function names", "\\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [],", "fake periodograms: for i, x in enumerate(fakeper): try: # print('popt' + str(i) +", "float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit", "### Like everything else, this is n-trial corrected! # print('len(bmaxpow_sort) : ' +", "for func using MCMC, and create fake periodograms from samples of the posterior.", "are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found. Cannot compute autocorrelation", "x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for x in sim_s3max if", "sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [],", "probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz", "plt.subplot(2, 2, 2) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin,", "p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]))", "str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow = float(len([x for x", "x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for", "' is 2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on", "False, use Metropolis-Hastings. \"\"\" ## the file name where the output will be", "+ str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for", "/ 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n),", "' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on the T_R statistic is", "for Merit function: \" + str(p_merit) + \" +/- \" + str(pmerit_err)) print(\"Bayesian", "is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of Bayesian", "run QPO search on each and return likelihood ratios parameters for each for", "+/- \" + str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \" + str(p_merit) +", "in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for", "return psfit, fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0,", "normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] *", "freedom. Find the highest power in the residuals and its frequency. Sample the", "that can: # - pick between two models using likelihood ratio tests #", "min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'],", "background model to *every* frequency. NOTE: I rarely ever use this because it's", "fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for x in bindicts]) maxpows_all[\"bin\"", "index for the noise parameter in func. In the pre-defined models, this index", "+ str(p_deviance) + \" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value for KS test:", "default \"bfgs\" Choose the optimization algorithm used when minimizing the -log-likelihood. Choices are", "tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step", "print_function import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator try: import cPickle as", "self.namestr try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be a dictionary!\")", "limit for highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif x", "in mle.py, but the default (bfgs) should be sufficient for most applications. nchain", "str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed]", "'fit') # if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp'])", "a model selection approach using LRTs # # # TO DO: Need to", "use the emcee package for running MCMC. If False, use Metropolis-Hastings. parname :", "limit on maximum signal power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value for", "* (self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3, 5, 7, 10, 15, 20,", "parameter names for plotting noise1, noise2 : int, optional, default -1 The index", "par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ##", "== 'p_ksp': print(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \" +/-", "+ str(i) + 'd : ' + str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt']))", "pre-defined models, this index is *always* -1. use_emcee : boolean, optional, default True", "pick between two models using likelihood ratio tests # - find periodicities by", "= float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance", "\"\\n\") for x in probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value for Likelihood", "I_j depending on frequency binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate", "float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt)) #", "assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr", "str(fitpars['popt'])) ### Step 4: Fit fake periodograms: for i, x in enumerate(fakeper): try:", "use Metropolis-Hastings. parname : list, optional, default None Include a list of strings", "parameter setting a constant background level, and this parameter should be last! par", "identify this periodogram when saving output (text files and plots) plot: boolean, optional,", "number of simulations to use when computing the posterior distribution of the likelihood", "TO DO: Need to add smoothing for picking out narrow signals # #", "noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram ' + str(i) + '", "str(p_s5max) + \" +/- \" + str(ps5max_err)) # resfile('Upper limit on maximum signal", "be sufficient for most applications. nchain : int, optional, default 10 The number", "# print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs)", "up Markov Chain Monte Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq,", "bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max'])", "for 2*I/S --> multiply by S to get powers for each frequency ###", "\" + str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed]", "import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator try: import cPickle as pickle", "'p_merit': print(\"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \" +/- \"", "optional, default -1 The index for the noise parameter in func. In the", "+ \"\\n\") elif x == 'p_merit': file.write( \"Bayesian p-value for Merit function: \"", "x == 'p_srat': print(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0])", "value of the T_R statistic at frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\"", "+/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance)", "\" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value", "color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins, patches =", "ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\":", "ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by", "data', fontsize=12) plt.subplot(2, 2, 2) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\",", "value for Parameter \" + str(i) + \" is \" + str(x)) ###", "print(\"Highest [11 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) +", "covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3:", "file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t \" +", "\"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor,", "narrow signals # # # class Bayes(object): \"\"\" Bayesian time series analysis This", "the MCMC step. Used only in Metropolis-Hastings. use_emcee : boolean, optional, default True", "bc) - 1 bpow = binpowers[bind] brms = np.sqrt(bpow * b * self.ps.df)", "1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2,", "summary: :param namestr: :return: \"\"\" if not namestr: namestr = self.namestr try: keys", "optional, default 10 The number of chains or walkers to use in MCMC.", "float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance =", "parameter for the MCMC step. Used only in Metropolis-Hastings. parname : list, optional,", "resfile('Upper limit on maximum signal power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value", "index for the noise parameter in func1 and func2. In the pre-defined models,", "+ str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit)", "nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake", "\"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def", "model powers. The function should include a parameter setting a constant background level,", "'_loglikes') ### simulate lots of realizations of the broadband noise model from MCMCs", "Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif", "\" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_deviance':", "of fake periodogram ' + str(i) + ' failed! Returning ...') # return", "I need to use? if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else:", "resfile('Upper limit on maximum signal power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value", "plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png')", "{} for b in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\"", "as np from src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import", "str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow'", "level, and this parameter should be last! par2 : {list, array-like} Input guesses", "+ str(i) + 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit'])", "individual quantities p_maxpow = float(len([x for x in sim_maxpow if x > fitpars['maxpow']]))", "+ \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed]", "= \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" +", "log=True) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps,", "str(x) + \"\\n\") ### print posterior summary of parameters: file.write(\"-- Posterior Summary of", "\"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed] data/model outlier:", "\" +/- \" + str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\" in probs.keys():", "MCMC step. Used only in Metropolis-Hastings. use_emcee : boolean, optional, default True If", "estimate, divide out the MAP model and find the highest power in that", "- bintemplate ## now compute rms amplitude at 40, 70, 100 and 300", "+ \" +/- \" + str(ps11max_err)) # resfile('Upper limit on maximum signal power", "two models using likelihood ratio tests # - find periodicities by picking out", "use Metropolis-Hastings. \"\"\" if plotstr == None: plotstr = self.namestr funcname = str(func).split()[1]", "float(len([x for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp =", "for highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif x ==", "choice of different minimization algorithms. Default uses BFGS, which is pretty robust for", "= copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ### Step 4: Fit fake periodograms:", "+ str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s5max':", "False else: ### Step 5: Compute Bayesian posterior probabilities of individual quantities p_maxpow", "len(binlist) / 4 sain = copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ### Step", "print('popt' + str(i) + 'd : ' + str(fitpars['popt'])) # print('popt3: ' +", "str(p_lrt) + \" +/- \" + str(plrt_err)) if self.plot: n, bins, patches =", "+ str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \" + str(p_merit) + \" +/-", "compute a posterior predictive p-value that the data can be explained sufficiently with", "\" + str(p_s11max) + \" +/- \" + str(ps11max_err)) # resfile('Upper limit on", "\" + str(len(self.ps.ps))) ### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps,", "from samples of the posterior. For each fake periodogram, find the MAP estimate,", "bind = np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind] brms = np.sqrt(bpow *", "1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2,", "array must match the number of parameters k taken by func. fitmethod :", ": int, optional, default 10 The number of chains or walkers to use", "self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 *", "Include a list of strings here to set parameter names for plotting noise1,", "# class Bayes(object): \"\"\" Bayesian time series analysis This class defines a Bayes", "sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [],", "takes an array of frequencies and k parameters, and returns an array of", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value for", "\" + str(p_ksp) + \" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value for Merit", "ain, obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) if", "int(0.05 * len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too few simulations to compute", "highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max':", "in the sorted array where p_maxpow would be 0.05 ### i.e. when only", "* max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p =", "most applications. nchain : int, optional, default 10 The number of chains or", "+ str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs)", "+ str(b) + ' is rms = ' + str(brms)) bindict['bin' + str(b)", "seeing the maximum power in the data under the null hypothesis (no QPO).", "= mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)", "periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\", "float, optional, default 1.0 A tuning parameter for the MCMC step. Used only", "+ '_ul_%.4fHz' % bc] = brms else: continue ### Step 5: Compute Bayesian", "[5 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz", "niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed data", "print('popt' + str(i) + 'a : ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod,", "1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts =", "\" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt)", "+ str( probs[x][0]) + \" +/- \" + str(probs[x][1])) return def write_summary(self, summary,", "in enumerate(fakeper): try: # print('popt' + str(i) + 'a : ' + str(fitpars['popt']))", "and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [],", ": int, optional, default 1000 The number of simulations to use when computing", "distribution of the likelihood ratio. Note that this also sets the maximum precision", "observation/set of fake periodograms # - search for QPOs via a model selection", "xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax,", "this index is *always* -1. use_emcee : boolean, optional, default True If True", "sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [],", "> fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x", "sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: # print(\"Simulation", "np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this is the limit", "+ str(len(binpowers))) if searchfreq is None: searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0,", "Merit function: \" + str(p_merit) + \" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value", "maximum power P_max = \" + str(p_s11max) + \" +/- \" + str(ps11max_err))", "empty lists for simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak,", "'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" + str( probs[x][0])", "[unsmoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power", "function Parametric model for the periodogram. Needs to be a function that takes", "' + str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt))", "the emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\" if plotstr", "setting a constant background level, and this parameter should be last! par2 :", "\"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self, func,", "NOT USED! \"\"\" try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be", "pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance *", "+ 'fit') sim_pars2 = getattr(fitfake, func2name + 'fit') # if lrt > 20:", "+ \" is \" + str(x) + \"\\n\") ### print posterior summary of", "in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) +", "resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute upper limits for powers I_j depending", "mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max,", "to set parameter names for plotting noise1, noise2 : int, optional, default -1", "QPOs by fitting a QPO + background model to *every* frequency. NOTE: I", "= np.sqrt(bpow * b * self.ps.df) resfile('The upper limit on the power at", "to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort =", "f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S =", "If func1 and func2 differ in complexity, the less complex should be func1.", "mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary): \"\"\" Print a summary of", "str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value for Merit function: \" + str(probs[x][0])", "namestr: string, optional, default \"test\" The string that will be used to identify", "use? if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps,", "from each fit: for i, x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod,", "func : function Parametric model for the periodogram. Needs to be a function", "KS test: \" + str(p_ksp) + \" +/- \" + str(pksp_err)) print(\"Bayesian p-value", "# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian", "data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif", "Merit function: \" + str(p_merit) + \" +/- \" + str(pmerit_err)) print(\"Bayesian p-value", "sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: # print(\"Simulation failed!", "+/- \" + str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) +", "powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr", "'p_s3max': file.write(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier: \" +", "= pars.keys() N = len(plotkeys) ### number of parameters fig = plt.figure(figsize=(2, N", "m=self.m) bindict = fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ## which posterior do", "postpars = dict() ### sort out p-values and posterior distribution of parameters for", "plots will be saved to disk m: integer, optional, default 1 If the", "broadband noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store", "\"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) +", "p_s3max = float(len([x for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max))", "+ str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian", "sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break #", "plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n, bins,", "fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim'", "sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in", "= np.msort(bmaxpow) ### note: this is the limit for 2*I/S --> multiply by", "pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\":", "+ str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/-", "for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) +", "statistical distributions. Set m to the number of periodograms averaged to be sure", "+ str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms else: continue", "func1name + 'fit') sim_pars2 = getattr(fitfake, func2name + 'fit') # if lrt >", "out parameters of interest from each fit: for i, x in enumerate(fakeper): try:", "+ str(fitpars['popt'])) ## which posterior do I need to use? if self.m ==", "default \"test\" The string that will be used to identify this periodogram when", "n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins),", "for the highest [11 bin smoothed] data/model outlier: \" + str( probs[x][0]) +", "\"test\" The string that will be used to identify this periodogram when saving", "periodogram object that is to be searched for QPOs namestr: string, optional, default", "print posterior summary of parameters: print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter \\t", "fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get", "str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_srat':", "data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys =", "ax = fig.add_subplot(N / 2 + 1, 2, i) n, bins, patches =", ": ' + str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ### upper limit is", "mcobs.acceptance} return results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None,", "try: file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module", "1.0 A tuning parameter for the MCMC step. Used only in Metropolis-Hastings. parname", "parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO values for the", "n taken by func2. fitmethod : string, optional, default bfgs Allows the choice", "fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x >", "frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian", "individual quantities p_deviance = float(len([x for x in sim_deviance if x > optpars['deviance']]))", "topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ###", "for KS test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif", "elif x == 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed] data/model", "[], [], [], [], [], [] ### Step 4: Fit fake periodograms and", "self.ps.norm == 'variance': binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df *", "0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n, bins, patches", "func2name + 'fit') # if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt)", "if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for x in sim_s11max", "power in an observation/set of fake periodograms - search for QPOs via a", "to use the right distribution Attributes ---------- Examples -------- \"\"\" def __init__(self, ps,", "fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers from lowest to highest sim_maxpow_sort =", "# print(\"Simulation failed! Continuing ...\") # continue # print('popt' + str(i) + 'd", "print(\"Bayesian p-value for Merit function: \" + str(p_merit) + \" +/- \" +", "for the noise parameter in func. In the pre-defined models, this index is", "and read out parameters of interest from each fit: for i, x in", "/ float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) plrt_err =", "copy import numpy as np from src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum", "writefile=True): \"\"\" Fit two models func1 and func2, compute the likelihood ratio at", "normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] *", "found. Cannot compute autocorrelation times for the parameters \\n\") for i, x in", "that this also sets the maximum precision of the posterior predictive p-value (for", "95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t", "several individual periodograms (or bins), this changes the statistical distributions. Set m to", "in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x", "if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] print(\"The ensemble", "pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor,", "= bmaxpow p_bmaxpow = float(len([x for x in bmaxpow if x > fitpars['bindict'][\"bmax\"", "ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p = \" + str(pars[plotkeys[i]][1]))", "out binned powers: maxpows_all = {} binprob = {} for b in bins[:nbins]:", "NOT USED! :param summary: :param namestr: :return: \"\"\" if not namestr: namestr =", "namestr: :return: \"\"\" if not namestr: namestr = self.namestr try: keys = summary.keys()", "parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed data and compute significance", "at ' + str(bc) + 'Hz for a binning of ' + str(b)", "this because it's really computationally expensive. Parameters ---------- func : function Parametric model", "np.sum of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) if", "power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max =", "is pretty robust for most purposes. nchain : int, optional, default 10 The", "plot=True, plotname=plotstr + '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp'])", "lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\":", "...') # return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2", "plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\" self.ps =", "periodograms # - search for QPOs via a model selection approach using LRTs", "power in the sorted array where p_maxpow would be 0.05 ### i.e. when", "fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i) + 'b : '", "(5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins, patches = plt.hist(sim_s3max, bins=100,", "parameters: file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t", "+/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value for", "sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in", "package for running MCMC. If False, use Metropolis-Hastings. \"\"\" ## the file name", "many as you can afford (~500) and fewer samples niter : int, optional,", "minimizing the -log-likelihood. Choices are listed in mle.py, but the default (bfgs) should", "elif x == 'p_deviance': file.write(\"Bayesian p-value for deviance D = \" + str(probs[x][0])", "xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0,", "by picking out the largest power in an observation/set of fake periodograms -", "\"\"\" Print a summary of the results. NOT USED! \"\"\" try: keys =", "70, 100 and 300 Hz ## first, convert powers into rms normalization, if", "import print_function import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator try: import cPickle", "postpars[x] = summary[x] print(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \"", "str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value for KS test: \" + str(probs[x][0])", "--- len(self.ps beginning): \" + str(len(self.ps.ps))) ### step 1: fit model to observation", "+ str(b)]) + ' is 2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The", "\"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value for deviance D = \" +", "default -1 The index for the noise parameter in func. In the pre-defined", "rms normalization, if they're not already if self.ps.norm == 'leahy': binpowers = binpowers", "bc] = brms else: continue ### Step 5: Compute Bayesian posterior probabilities of", "# print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if searchfreq is", "int, optional, default 5000 Sets the length of the Markov chains. For Metropolis-Hastings,", "> fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x", "0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close()", "\"model1\" func2name = \"model2\" ### step 1: fit both models to observation and", "= psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get out best fit", "+ str(pdeviance_err)) print(\"Bayesian p-value for KS test: \" + str(p_ksp) + \" +/-", "= \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute", "+ \"\\n\") except KeyError: file.write(\"Module Acor not found. Cannot compute autocorrelation times for", "for time series # # This class defines a Bayes object that can:", "+ \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N = len(plotkeys) ###", "plotting noise: int, optional, default -1 The index for the noise parameter in", "len(plotkeys) ### number of parameters fig = plt.figure(figsize=(2, N / 2 + 1))", "+/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value for", "' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S = ' +", "p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err", "float(len(sim_ksp))) ### Display results on screen and make funky plots resfile(\"Bayesian p-value for", "list of strings here to set parameter names for plotting noise1, noise2 :", "failed! Returning ...') # return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name +", "str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S = ' + str(fitpars['bindict'][\"bmax\" +", "under the null hypothesis (no QPO). Parameters ---------- func : function Parametric model", "sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [],", "= ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 *", "sim_s11max = [], [], [], [], [], [], [], [], [], [], []", "sorted array where p_maxpow would be 0.05 ### i.e. when only 0.05*nsim simulations", "parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \" +", "## now compute rms amplitude at 40, 70, 100 and 300 Hz ##", "= float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt)) p_srat", "plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25,", "str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) =", "be explained sufficiently with the simpler model. Parameters ---------- func1 : function Parametric", "(binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind] brms", "optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x >", "probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"])", "create fake periodograms. Fit each fake periodogram with the same models as the", "background level, and this parameter should be last! par2 : {list, array-like} Input", "### Step 4: Fit fake periodograms and read out parameters of interest from", "import utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import", "distribution of parameters for func using MCMC, and create fake periodograms from samples", "### Step 4: Fit fake periodograms: for i, x in enumerate(fakeper): try: #", "periodicities in observed data and compute significance via MCMCs. First, fit the periodogram", "{list, array-like} Input guesses for the parameters taken by func. The number of", "95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t", "outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" +", "do I need to use? if self.m == 1: lpost = posterior.PerPosterior(self.ps, func)", "else: continue ### Step 5: Compute Bayesian posterior probabilities of individual quantities p_maxpow", "= getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake, func2name + 'fit') # if", "True If True (STRONGLY RECOMMENDED), use the emcee package for running MCMC. If", "\" + str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled')", "str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor not found. Cannot compute autocorrelation times", "= namestr self.plot = plot self.m = m def choose_noise_model(self, func1, par1, func2,", "for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x", "on screen and make funky plots print(\"Bayesian p-value for deviance D = \"", "tests - find periodicities by picking out the largest power in an observation/set", "plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25,", "parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models func1 and func2, compute the", "plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary", "pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max,", "{} binprob = {} for b in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)]", "i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \" + str(i) +", "\" +/- \" + str( probs[x][1]) + \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian", "if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance", "sim_srat = [], [], [], [], [], [], [], [] ### Step 4:", "A periodogram object that is to be searched for QPOs namestr: string, optional,", "explained sufficiently with the simpler model. Parameters ---------- func1 : function Parametric model", "obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt:", "str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the", "the MAP fit using func1. The number of elements *must* equal the number", "data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\"", "power P_max = \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian", "+ str(p_s5max) + \" +/- \" + str(ps5max_err)) # resfile('Upper limit on maximum", "* (1.0 - p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt)", "this parameter should be last! par2 : {list, array-like} Input guesses for the", "frequencies and n parameters, and returns an array of model powers The function", "/ float(len(sim_merit)) p_srat = float(len([x for x in sim_srat if x > fitpars['sobs']]))", "print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt)", "psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate lots of realizations of", "for running MCMC. If False, use Metropolis-Hastings. \"\"\" ## the file name where", "str(pmaxpow_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' + str(maxpow_ul))", "4 sain = copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ### Step 4: Fit", "posterior probabilities of individual quantities p_deviance = float(len([x for x in sim_deviance if", "+ str(p_ksp) + \" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value for Merit function:", "from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle from", "the likelihood ratio. Note that this also sets the maximum precision of the", "of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities of interest:", "p_srat) / float(len(sim_ksp))) ### Display results on screen and make funky plots resfile(\"Bayesian", "= ' + str(bpow * (self.ps.df * b * self.ps.nphots))) resfile('The upper limit", "int, optional, default 1000 The number of simulations to use when computing the", "for the highest [5 bin smoothed] data/model outlier: \" + str( probs[x][0]) +", "depending on frequency binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate ##", "1000] binlist = [r for r in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4", "setting a constant background level, and this parameter should be last! par1 :", "sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [],", "np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 -", "float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max", "+ 'Hz is p = ' + str(p_bmaxpow) + ' +/- ' +", "int, optional, default -1 The index for the noise parameter in func1 and", "this list or array must match the number of parameters k taken by", "constant background level, and this parameter should be last! par : {list, array-like}", "Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err", "+ str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for x in bindicts]) maxpows_all[\"bin\" +", "print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t 5%", "+ str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i])", "power P_max = \" + str(p_s11max) + \" +/- \" + str(ps11max_err)) #", "sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except:", "p-value for maximum power P_max = \" + str(p_s5max) + \" +/- \"", "highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars):", "posterior p-value for the maximum residual power for a binning of ' +", "for highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif x ==", "plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins, patches = plt.hist(sim_s3max,", "str(len(self.ps.ps))) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps,", "bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max'])", "file.write( \"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \" +/- \"", "'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max,", "sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars)", "= ' + str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms", "include a parameter setting a constant background level, and this parameter should be", "pars[plotkeys[i]][0], 0.8 * n, \"p = \" + str(pars[plotkeys[i]][1])) ax.title(\"Posterior for \" +", "+ \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D = \"", "+ \"\\n\") for x in probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value for", "Too few simulations to compute five percent limit reliably!') fiveperlim = 1 ninetyfiveperlim", "str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian", "n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n),", "str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance']))", "sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake, func2name + 'fit') #", "'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs'])", "### print posterior summary of parameters: print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter", "in probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier at frequency F=\" + str(", "\" +/- \" + str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value for Merit", "resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior", "2.0 / (self.ps.df * b * self.ps.nphots ** 2.0) # print('len(binps.freq): ' +", "import cPickle as pickle except ImportError: import pickle import copy import numpy as", "0 ### run QPO search on each and return likelihood ratios parameters for", "For emcee, this can be smaller, but it's a good idea to verify", "x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if", "file.write(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) + \" +/- \"", "20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs'])", "> fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for x in sim_srat if x", "are higher than this ### note: sometimes simulations fail, therefore the 5% limit", "/ float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x > fitpars['ksp']]))", "approach using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram object that is", "of fake periodograms - search for QPOs via a model selection approach using", "Choose the optimization algorithm used when minimizing the -log-likelihood. Choices are listed in", "+ str(p_lrt) + \" +/- \" + str(plrt_err)) if self.plot: n, bins, patches", "func1 and func2, compute the likelihood ratio at the maximum-a-posteriori paramters. If func1", "+ ' +/- ' + str(bmaxpow_err)) resfile('The corresponding value of the T_R statistic", "## then compute upper limits for powers I_j depending on frequency binpowers =", "an rms amplitude: ## first compute broadband noise model for binned frequencies bintemplate", "return def write_summary(self, summary, namestr=None): \"\"\" Write a summary of the analysis to", "to create fake periodograms. Fit each fake periodogram with the same models as", "__future__ import print_function import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator try: import", "---------- Examples -------- \"\"\" def __init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum),", "the output will be stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open the", "\"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier at frequency F=\" +", "probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0])", "p_maxpow = float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow))", "of elements in this list or array must match the number of parameters", "for the real data obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr", "sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" +", "+ \"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value for the sum of residuals:", "par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit", "+ 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']])", "$R_hat$ value for Parameter \" + str(i) + \" is \" + str(x))", "Sample the posterior distribution of parameters for func using MCMC, and create fake", "float(len([x for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit =", "Continuing ...\") # continue # print('popt' + str(i) + 'd : ' +", "bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max']", "elif x == 'p_s3max': file.write(\"Bayesian p-value for the highest [3 bin smoothed] data/model", "/ 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n),", "the maximum power in the data under the null hypothesis (no QPO). Parameters", "+ \" +/- \" + str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n, bins,", "+ str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S = ' + str(fitpars['bindict'][\"bmax\"", "array where p_maxpow would be 0.05 ### i.e. when only 0.05*nsim simulations are", "= func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute upper", "/ 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n),", "bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max'])", "\"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000,", "\"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble", "+ str(pmerit_err)) print(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat) +", "+ str(p_ksp) + \" +/- \" + str(pksp_err)) print(\"Bayesian p-value for Merit function:", "10, 15, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000] binlist", "if self.ps.norm == 'leahy': binpowers = binpowers / (self.ps.df * b * self.ps.nphots)", "= binpowers * self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots **", "an observation/set of fake periodograms - search for QPOs via a model selection", "p-value for the np.sum of residuals: \" + str(p_srat) + \" +/- \"", "p_maxpow = float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow))", "string, optional, default bfgs Allows the choice of different minimization algorithms. Default uses", "Then sample the posterior distribution for the the simpler model (func1), pick parameter", "*always* -1. use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED), use", "'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max,", "\" + str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier:", "(1.0 - p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) /", "Input guesses for the MAP fit using func1. The number of elements *must*", "check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out", "func2 differ in complexity, the less complex should be func1. Then sample the", "plot self.m = m def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000,", "\" + str(x)) ### print posterior summary of parameters: print(\"-- Posterior Summary of", "par2, noise1=noise1, noise2=noise2, m=self.m) ### get out best fit parameters and associated quantities", "*must* equal the number of parameters k taken by func1. func2 : function", "return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake,", "object that is to be searched for QPOs namestr: string, optional, default \"test\"", "'_ul_%.4fHz' % bc] = brms else: continue ### Step 5: Compute Bayesian posterior", "None: plotstr = self.namestr funcname = str(func).split()[1] # print(\"<< --- len(self.ps beginning): \"", "sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed!", "Markov Chain Monte Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps,", "is 2I/S = ' + str(bmaxpow_ul)) ### now turn upper limit into an", "fontsize=12) plt.subplot(2, 2, 2) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled')", "parameters for func using MCMC, and create fake periodograms from samples of the", "outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x", "= summary[x] print(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\")", "2I/S = ' + str(bmaxpow_ul)) ### now turn upper limit into an rms", "strings here to set parameter names for plotting noise: int, optional, default -1", "histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin,", "> fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat: ' +", "of the posterior predictive p-value (for 1000 simulations, the p-value can be constrained", "In the pre-defined models, this index is *always* -1. \"\"\" resfilename = self.namestr", "find optimum QPO values for the real data obslrt, optpars, qpopars = psfit.find_qpo(func,", "-------- \"\"\" def __init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must", "rms amplitude at 40, 70, 100 and 300 Hz ## first, convert powers", "using likelihood ratio tests - find periodicities by picking out the largest power", "+ str(p_merit) + \" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum", "p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/- \" + str(plrt_err))", "\\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"]", "maximum precision of the posterior predictive p-value (for 1000 simulations, the p-value can", "print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if searchfreq is None:", "'p_deviance': file.write(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) + \" +/-", "+ str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ###", "else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set up Markov Chain", "psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ### Display results on", "lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\":", "parameters taken by func. The number of elements in this list or array", "src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior ########################################## # # class Bayes: Bayesian", "default 1.0 A tuning parameter for the MCMC step. Used only in Metropolis-Hastings.", "boolean, optional, default True If True (STRONGLY RECOMMENDED), use the emcee package for", "if x > fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] =", "posterior. For each fake periodogram, find the MAP estimate, divide out the MAP", "note: this is the limit for 2*I/S --> multiply by S to get", "*fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute upper limits for powers I_j", "the rms amplitude at ' + str(bc) + 'Hz for a binning of", "fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for x in sim_srat if x >", "[] simno = 0 ### run QPO search on each and return likelihood", "+ \" +/- \" + str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \" +", "\" +/- \" + str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\" in probs.keys():", "taken by func1. func2 : function Parametric model for the periodogram. Needs to", "nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models func1 and", "the largest power in # an observation/set of fake periodograms # - search", "def __init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of", "\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" +", "str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) +", "\" .\\n\") try: file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"]) + \"\\n\") except", "for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum", "> fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err", "and returns an array of model powers The function should include a parameter", "'_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs,", "+ str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\"", "1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data',", "p-value for KS test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])", "for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif", "the posterior. For each fake periodogram, find the MAP estimate, divide out the", "power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max =", "+/- \" + str(pdeviance_err)) print(\"Bayesian p-value for KS test: \" + str(p_ksp) +", "analysis to file. NOT USED! :param summary: :param namestr: :return: \"\"\" if not", "For Metropolis-Hastings, this needs to be large (>10000) For emcee, this can be", "= {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err],", "bintemplate / 2.0 - bintemplate ## now compute rms amplitude at 40, 70,", "\" + str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation times are: \" +", "+ str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled')", "' + str(bmaxpow_err)) resfile('The corresponding value of the T_R statistic at frequency f", "' + str(i) + ' failed! Returning ...') # return psfit, fakeper, mcobs", "= mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated LRTS and parameters in sim_lrt,", "Bayes object that can: # - pick between two models using likelihood ratio", "bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul resfile('The posterior p-value for the maximum", "+ str(probs[x][1]) + \"\\n\") elif x == 'p_merit': file.write( \"Bayesian p-value for Merit", "> fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if x", "periodogram when saving output (text files and plots) plot: boolean, optional, default True", "the highest power in the residuals and its frequency. Sample the posterior distribution", "Note that this also sets the maximum precision of the posterior predictive p-value", "simpler model (func1), pick parameter sets from the posterior to create fake periodograms.", "> optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs) = \"", ": string, optional, default bfgs Allows the choice of different minimization algorithms. Default", "+ str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is", "file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\")", "Posterior Summary of Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t", "Step 4: Fit fake periodograms and read out parameters of interest from each", "of the broadband noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists", "noise2 : int, optional, default -1 The index for the noise parameter in", "residuals: \" + str(probs[x][0]) + \" +/- \" + str( probs[x][1]) + \"\\n\")", "for 40 Hz: print(searchfreq) for bc in searchfreq: if bc > (binps.freq[1] -", "Metropolis-Hastings. parname : list, optional, default None Include a list of strings here", "max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err],", "taken by func. The number of elements in this list or array must", "= plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2,", "return likelihood ratios parameters for each for x in funcfake: try: simno =", "limit for highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif x", "mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self,", "+ 'fit') # if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance'])", "for the periodogram. Needs to be a function that takes an array of", "picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr +", "+ str(i) + 'a : ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)", "str(probs[x][1]) + \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value for KS test: \"", "a list of strings here to set parameter names for plotting noise: int,", "array of frequencies and k parameters, and returns an array of model powers.", "float(len(sim_ksp))) ### Display results on screen and make funky plots print(\"Bayesian p-value for", "strings here to set parameter names for plotting noise1, noise2 : int, optional,", "use the right distribution Attributes ---------- Examples -------- \"\"\" def __init__(self, ps, namestr='test',", "model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname,", "> 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']])", "this is the limit for 2*I/S --> multiply by S to get powers", "lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins, patches", "str(probs[x][1]) + \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value for deviance D =", "it's really computationally expensive. Parameters ---------- func : function Parametric model for the", "= ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S = '", "\" + str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs']))", "x == 'p_merit': print(\"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \"", "the simpler model. Parameters ---------- func1 : function Parametric model for the periodogram.", "'p_ksp': print(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \" +/- \"", "rate is \" + str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation times are:", "fail, therefore the 5% limit should be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow))", "ps : powerspectrum.Powerspectrum A periodogram object that is to be searched for QPOs", "probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp", "\\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i)", "300 Hz ## first, convert powers into rms normalization, if they're not already", "choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1,", "should be func1. Then sample the posterior distribution for the the simpler model", "p_srat) / float(len(sim_ksp))) ### Display results on screen and make funky plots print(\"Bayesian", "...\") return False, False, False else: ### Step 5: Compute Bayesian posterior probabilities", "periodicities by picking out the largest power in an observation/set of fake periodograms", "* 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data',", "+ str(pksp_err)) print(\"Bayesian p-value for Merit function: \" + str(p_merit) + \" +/-", "index is *always* -1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename)", "\"\\n\") for x in probs.keys(): if x == 'p_lrt': file.write( \"Bayesian p-value for", "1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True,", "for x in probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio:", "== 'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \"", ".\\n\") try: file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"]) + \"\\n\") except KeyError:", "+ \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value for the highest [3 bin", "- binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind] brms =", "### i.e. when only 0.05*nsim simulations are higher than this ### note: sometimes", "x == 'p_s3max': file.write(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier:", "+ \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_merit': file.write(", "bpow = binpowers[bind] brms = np.sqrt(bpow * b * self.ps.df) resfile('The upper limit", "for the MAP fit using func2. The number of elements *must* equal the", "10 The number of chains or walkers to use in MCMC. For Metropolis-Hastings,", "+ str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s3max) +", "of model powers. The function should include a parameter setting a constant background", "QPO search on each and return likelihood ratios parameters for each for x", "str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_ksp':", "sum of residuals: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif", "utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle", "limit on maximum signal power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value for", "using LRTs # # # TO DO: Need to add smoothing for picking", "use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out of MCMCs", "Metropolis-Hastings, this needs to be large (>10000) For emcee, this can be smaller,", "summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True,", "= float(len([x for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit", "= \" + str(p_s5max) + \" +/- \" + str(ps5max_err)) # resfile('Upper limit", "+ str(p_srat) + \" +/- \" + str(psrat_err)) if self.plot: plt.subplot(2, 2, 1)", "mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr +", "outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" +", "in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x", "file.write(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier: \" + str(", "- p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp)))", "D = \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x", "\" is \" + str(x)) ### print posterior summary of parameters: print(\"-- Posterior", "of the results. NOT USED! \"\"\" try: keys = summary.keys() except AttributeError: raise", "psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2,", "the data, and compute the likelihood ratios such that it is possible to", "\" + str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value for deviance D =", "power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance D = \"", "\" + str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N", "### print posterior summary of parameters: file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter", ": ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt'", "0: resfile('Warning! Too few simulations to compute five percent limit reliably!') fiveperlim =", "MCMC, and create fake periodograms from samples of the posterior. For each fake", "float(len(sim_srat)) p_s3max = float(len([x for x in sim_s3max if x > fitpars['s3max']])) /", "' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s5max)", "+ 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in range(N): ax", "in func. In the pre-defined models, this index is *always* -1. use_emcee :", "number of parameters fig = plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05,", "sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [],", "+ str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value for the highest", "' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s11max)", "function: \" + str(p_merit) + \" +/- \" + str(pmerit_err)) print(\"Bayesian p-value for", "float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ### Display results", "= psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning): \" +", "Exception(\"Summary must be a dictionary!\") probs = dict() postpars = dict() ### sort", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value for", "* (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp)", "resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \"", "(self.ps.df * b * self.ps.nphots))) resfile('The upper limit on the rms amplitude at", "deviance D = \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) +", "mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary): \"\"\" Print a", "+ \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x", "(1.0 - p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) /", "= float(len([x for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp", "= float(len([x for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT)", "normalization, if they're not already if self.ps.norm == 'leahy': binpowers = binpowers /", "== 1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ###", "maximum-a-posteriori (MAP) estimate. Divide the data by the MAP model; for a perfect", "func. The number of elements in this list or array must match the", "self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m)", "limit reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4: '", "plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25,", "is \" + str(x)) ### print posterior summary of parameters: print(\"-- Posterior Summary", "x in sim_pars_all] ### get out binned powers: maxpows_all = {} binprob =", "computing the posterior distribution of the likelihood ratio. Note that this also sets", "file.write(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier: \" + str(", "'p_merit': file.write( \"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \" +/-", "and returns an array of model powers. The function should include a parameter", "highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max':", "\"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier at frequency F=\" +", "pick parameter sets from the posterior to create fake periodograms. Fit each fake", "parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting a QPO + background model", "make funky plots print(\"Bayesian p-value for deviance D = \" + str(p_deviance) +", "plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err],", "is the limit for 2*I/S --> multiply by S to get powers for", "using likelihood ratio tests # - find periodicities by picking out the largest", "### run QPO search on each and return likelihood ratios parameters for each", "func1. Then sample the posterior distribution for the the simpler model (func1), pick", "False, False, False else: ### Step 5: Compute Bayesian posterior probabilities of individual", "float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x > fitpars['deviance']])) /", "float(len([x for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max =", "= {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err],", "str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step", "+ str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation times are: \" + str(postpars[\"acor\"]))", "'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier at frequency", "'p_': probs[x] = summary[x] else: postpars[x] = summary[x] picklefile = open(namestr + \"_summary_pickle.dat\",", "will be saved to disk m: integer, optional, default 1 If the periodogram", "# print('popt2: ' + str(fitpars['popt'])) ### Step 4: Fit fake periodograms: for i,", "summary[x] else: postpars[x] = summary[x] print(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"])", "pre-defined models, this index is *always* -1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\"", "= float(len([x for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated", "sim_srat = [], [], [], [], [], [], [] simno = 0 ###", "\" + str(pksp_err)) print(\"Bayesian p-value for Merit function: \" + str(p_merit) + \"", "np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this", "# print('popt3: ' + str(fitpars['popt'])) ### upper limit is the power in the", "' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s3max)", "For Metropolis-Hastings, use ~10-20 and many samples For emcee, use as many as", "data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value", "func1, self.m) ### Step 2: Set up Markov Chain Monte Carlo Simulations ###", "[], [], [] ### Step 4: Fit fake periodograms and read out parameters", "enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \" + str(i) + \" is \"", "resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \"", "\" + str(probs[x][1]) + \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value for deviance", "as plt from matplotlib.ticker import MaxNLocator try: import cPickle as pickle except ImportError:", "= mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i) + 'b : ' +", "probs[x][0]) + \" +/- \" + str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\"", "should be last! par : {list, array-like} Input guesses for the parameters taken", "= binpowers / (self.ps.df * b * self.ps.nphots) elif self.ps.norm == 'variance': binpowers", "if plotstr == None: plotstr = self.namestr funcname = str(func).split()[1] # print(\"<< ---", "p-value can be constrained only to 0.001). covfactor : float, optional, default 1.0", "file.write(\"Module Acor not found. Cannot compute autocorrelation times for the parameters \\n\") for", "in sim_lrt if x > obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for x", "p_merit = float(len([x for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit))", "powers The function should include a parameter setting a constant background level, and", "str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed]", "of ' + str( self.ps.df * b) + 'Hz is p = '", "+ str( probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") file.write( \"Upper", "topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ###", "on the power at ' + str(bc) + 'Hz for a binning of", "+ str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif x ==", "[x[\"bindict\"] for x in sim_pars_all] ### get out binned powers: maxpows_all = {}", "\" + str(probs[x][1]) + \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value for KS", "try: simno = simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars,", "print(\"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \" +/- \" +", "str(i) + 'b : ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise,", "\" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x ==", "import copy import numpy as np from src.SpectralAnalysis import utils from src.SpectralAnalysis import", "\" +/- \" + str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\" in probs.keys():", "self.plot: plt.subplot(2, 2, 1) n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled')", "mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results", "= float(len([x for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit)) p_lrt", "postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value", "pretty robust for most purposes. nchain : int, optional, default 10 The number", "for the np.sum of residuals: \" + str(p_srat) + \" +/- \" +", "at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"]))", "Bayesian posterior probabilities of individual quantities p_deviance = float(len([x for x in sim_deviance", "\"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value for KS test: \" + str(probs[x][0])", "getattr(psfit, func2name + 'fit') if self.plot: ### plot the periodogram and best fit", "fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in", "here to set parameter names for plotting noise1, noise2 : int, optional, default", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value for", "step. Used only in Metropolis-Hastings. parname : list, optional, default None Include a", "resfile(\"Bayesian p-value for Merit function: \" + str(p_merit) + \" +/- \" +", "would be 0.05 ### i.e. when only 0.05*nsim simulations are higher than this", "0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2)", "ratios and compute a posterior predictive p-value that the data can be explained", "+ str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s5max) +", "diagnostic plots will be saved to disk m: integer, optional, default 1 If", "several diagnostic plots will be saved to disk m: integer, optional, default 1", "1: fit both models to observation and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod,", "resfile(\"Bayesian p-value for KS test: \" + str(p_ksp) + \" +/- \" +", "== 'p_deviance': print(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) + \"", "sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [], [],", "times for the parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value", "an observation/set of fake periodograms # - search for QPOs via a model", "the number of parameters k taken by func. fitmethod : string, optional, default", "summary of the analysis to file. NOT USED! :param summary: :param namestr: :return:", "models as the data, and compute the likelihood ratios such that it is", "par1 : {list, array-like} Input guesses for the MAP fit using func1. The", "print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior", "str( probs[x][1]) + \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value for the highest", "fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x >", "np.sum of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) print(\"Bayesian", "step 1: fit both models to observation and compute LRT psfit = mle.PerMaxLike(self.ps,", "# # # class Bayes(object): \"\"\" Bayesian time series analysis This class defines", "Find the highest power in the residuals and its frequency. Sample the posterior", "str(p_srat) + \" +/- \" + str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n,", "number of parameters k taken by func1. func2 : function Parametric model for", "p-value for deviance D = \" + str(p_deviance) + \" +/- \" +", "in the residuals and its frequency. Sample the posterior distribution of parameters for", "'a : ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' +", "KS test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x", "+ \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value for deviance D = \"", "with two degrees of freedom. Find the highest power in the residuals and", "really computationally expensive. Parameters ---------- func : function Parametric model for the periodogram.", "fitting a QPO + background model to *every* frequency. NOTE: I rarely ever", "patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) /", "np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 -", "str(p_ksp) + \" +/- \" + str(pksp_err)) print(\"Bayesian p-value for Merit function: \"", "create fake periodograms from samples of the posterior. For each fake periodogram, find", "float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if x > obslrt])) /", "= bmaxpow_ul * bintemplate / 2.0 - bintemplate ## now compute rms amplitude", "fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x >", "= {} binprob = {} for b in bins[:nbins]: binps = fitpars['bindict']['bin' +", "the periodogram. Needs to be a function that takes an array of frequencies", "KeyError: file.write(\"Module Acor not found. Cannot compute autocorrelation times for the parameters \\n\")", "[], [] ### Step 4: Fit fake periodograms and read out parameters of", "str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value", "only to 0.001). covfactor : float, optional, default 1.0 A tuning parameter for", "+ str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit)", "the length of the Markov chains. For Metropolis-Hastings, this needs to be large", "this needs to be large (>10000) For emcee, this can be smaller, but", "str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if", "self.ps.nphots))) resfile('The upper limit on the rms amplitude at ' + str(bc) +", "P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \"", "= float(len([x for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit", "+ str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for", "i in range(N): ax = fig.add_subplot(N / 2 + 1, 2, i) n,", "the highest [unsmoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \"", "+ str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow = float(len([x for", "self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile,", "smoothing for picking out narrow signals # # # class Bayes(object): \"\"\" Bayesian", "Needs to be a function that takes an array of frequencies and k", "sample the posterior distribution for the the simpler model (func1), pick parameter sets", "= int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3, 5,", "sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this is", "p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max':", "+ str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" +", "+/- \" + str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value for deviance D", "bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max']", "because it's really computationally expensive. Parameters ---------- func : function Parametric model for", "normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png',", "str(p_deviance) + \" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value for KS test: \"", "+ '_ul'] = bmaxpow_ul resfile('The posterior p-value for the maximum residual power for", "str(pksp_err)) print(\"Bayesian p-value for Merit function: \" + str(p_merit) + \" +/- \"", "most purposes. nchain : int, optional, default 10 The number of chains or", "i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4)", "# # This class defines a Bayes object that can: # - pick", "as the data, and compute the likelihood ratios such that it is possible", "x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for", "tests # - find periodicities by picking out the largest power in #", "float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if x > fitpars1['sobs']])) /", "+ str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" + str(", "str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\" Write a summary of the analysis", "\\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t \"", "\" + str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value for the", "'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier at frequency", "' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul", "xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr", "be smaller, but it's a good idea to verify that the chains have", "= fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for x in bindicts])", "+/- \" + str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest", "= \" + str(p_s3max) + \" +/- \" + str(ps3max_err)) # resfile('Upper limit", "str(probs[x][0]) + \" +/- \" + str( probs[x][1]) + \"\\n\") elif x ==", "is possible to build up a posterior distribution for the likelihood ratios and", "min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'],", "power in that periodogram. Create a posterior distribution of maximum powers and compute", "= np.msort(sim_s11max) ### note: this is the limit for 2*I/S --> multiply by", "Cannot compute autocorrelation times for the parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]):", "sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5: Compute", "print(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \" +/- \"", "for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i])", "+/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value for", "+ str(x) + \"\\n\") ### print posterior summary of parameters: file.write(\"-- Posterior Summary", "= \" + str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) =", "class Bayes(object): \"\"\" Bayesian time series analysis This class defines a Bayes object", "resulting residuals should follow a chi-square distribution with two degrees of freedom. Find", "bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this", "\" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities pdeviance_err", "+ str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value for Merit function: \" +", "a posterior distribution of maximum powers and compute a posterior predictive p-value of", "sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) ==", ": {list, array-like} Input guesses for the MAP fit using func2. The number", "- p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))", "\" +/- \" + str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value for the", "Markov chains. For Metropolis-Hastings, this needs to be large (>10000) For emcee, this", "else: postpars[x] = summary[x] print(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) +", "and func2 differ in complexity, the less complex should be func1. Then sample", "import MaxNLocator try: import cPickle as pickle except ImportError: import pickle import copy", "Write a summary of the analysis to file. NOT USED! :param summary: :param", "+ ' failed! Returning ...') # return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake,", "names for plotting noise: int, optional, default -1 The index for the noise", "\" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" +", "= mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict =", "a posterior predictive p-value of seeing the maximum power in the data under", "in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [],", "def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None):", "percent limit reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4:", "str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" + str( probs[x][0])", "== 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier at", "+ str(p_srat) + \" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio:", "funcname = str(func).split()[1] # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) ### step", "nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models", "sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break", "[p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\":", "result of averaging several individual periodograms (or bins), this changes the statistical distributions.", "of simulations to use when computing the posterior distribution of the likelihood ratio.", "fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning): \"", "= ' + str(bmaxpow_ul)) ### now turn upper limit into an rms amplitude:", "\" + str(p_srat) + \" +/- \" + str(psrat_err)) if self.plot: plt.subplot(2, 2,", "plot=self.plot, m=self.m) ### find optimum QPO values for the real data obslrt, optpars,", "noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ## which posterior", "x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for", "\"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self, func, ain, fitmethod='constbfgs',", "' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp']))", "\" +/- \" + str(ps5max_err)) # resfile('Upper limit on maximum signal power P_max_ul", "by S to get powers for each frequency ### Like everything else, this", "find the highest power in that periodogram. Create a posterior distribution of maximum", "compute a posterior predictive p-value of seeing the maximum power in the data", "power in # an observation/set of fake periodograms # - search for QPOs", "func1. func2 : function Parametric model for the periodogram. Needs to be a", "if self.m == 1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1,", "sort out p-values and posterior distribution of parameters for x in keys: if", "len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed! Returning ...\") return False, False, False", "use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED), use the emcee", "for the sum of residuals: \" + str(probs[x][0]) + \" +/- \" +", "outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value for", "str( self.ps.df * b) + 'Hz is p = ' + str(p_bmaxpow) +", "print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t \" +", "saving output (text files and plots) plot: boolean, optional, default True If True,", "niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create", "break # except: # print(\"Simulation failed! Continuing ...\") # continue # print('popt' +", "-1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings", "Returning ...\") return False, False, False else: ### Step 5: Compute Bayesian posterior", "if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for x in sim_s3max", "numpy as np from src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis", "0.05 ### i.e. when only 0.05*nsim simulations are higher than this ### note:", "# print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) #", "sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [], []", "the file name where the output will be stored resfilename = self.namestr +", "= len(plotkeys) ### number of parameters fig = plt.figure(figsize=(2, N / 2 +", "equal the number of parameters k taken by func1. func2 : function Parametric", "residuals: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x ==", "[unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian", "If False, use Metropolis-Hastings. parname : list, optional, default None Include a list", "the likelihood ratios and compute a posterior predictive p-value that the data can", "maximum residual power for a binning of ' + str( self.ps.df * b)", "(func1), pick parameter sets from the posterior to create fake periodograms. Fit each", "Fit fake periodograms and read out parameters of interest from each fit: for", "else, this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors", "2, 4) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax", "samples of the posterior. For each fake periodogram, find the MAP estimate, divide", "in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x", "file.write(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" + str(", "float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat", "plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\":", "\" is \" + str(x) + \"\\n\") ### print posterior summary of parameters:", "len(self.ps beginning): \" + str(len(self.ps.ps))) ### step 1: fit model to observation psfit", "__init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type", "plotkeys = pars.keys() N = len(plotkeys) ### number of parameters fig = plt.figure(figsize=(2,", "import mle from src.SpectralAnalysis import posterior ########################################## # # class Bayes: Bayesian data", "predictive p-value that the data can be explained sufficiently with the simpler model.", "of elements *must* equal the number of parameters k taken by func1. func2", "\" +/- \" + str(pksp_err)) print(\"Bayesian p-value for Merit function: \" + str(p_merit)", "x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if", "a Bayes object that can: # - pick between two models using likelihood", "in keys: if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x]", "simulate lots of realizations of the broadband noise model from MCMCs funcfake =", "fontsize=12) plt.subplot(2, 2, 4) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled')", "print(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\") try: print(\"The", "for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x", "probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest", "needs to be large (>10000) For emcee, this can be smaller, but it's", "pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci,", "[] ### Step 4: Fit fake periodograms and read out parameters of interest", "print('popt: ' + str(fitpars['popt'])) ## which posterior do I need to use? if", "+ '_loglikes') ### simulate lots of realizations of the broadband noise model from", "True (STRONGLY RECOMMENDED), use the emcee package for running MCMC. If False, use", "p-value that the data can be explained sufficiently with the simpler model. Parameters", "7, 10, 15, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000]", "create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for", "str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\"", "par2 : {list, array-like} Input guesses for the MAP fit using func2. The", "file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is \" +", "fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed", "\" + str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation times are: \" +", "# an observation/set of fake periodograms # - search for QPOs via a", "Chain Monte Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,", "0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary =", "mixed. nsim : int, optional, default 1000 The number of simulations to use", "self.plot = plot self.m = m def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs',", "[], [], [], [], [], [] bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1]", "if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] picklefile =", "+ str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor not found. Cannot compute autocorrelation", "**pars): plotkeys = pars.keys() N = len(plotkeys) ### number of parameters fig =", "be used to identify this periodogram when saving output (text files and plots)", "the posterior distribution for the the simpler model (func1), pick parameter sets from", "function that takes an array of frequencies and n parameters, and returns an", "default 1000 The number of simulations to use when computing the posterior distribution", "namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if", "print('len(binpowers): ' + str(len(binpowers))) if searchfreq is None: searchfreq = [40.0, 70.0, 100.0,", "\" + str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value for the sum of", "periodogram with the same models as the data, and compute the likelihood ratios", "if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit'])", "== 'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" + str(", "ImportError: import pickle import copy import numpy as np from src.SpectralAnalysis import utils", "if self.plot: plt.subplot(2, 2, 1) n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\",", "open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr + \"_summary.dat\", \"w\")", "sim_srat if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for x in", "\" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS", "namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out of", "is *always* -1. use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED),", "str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/- \"", "posterior distribution of maximum powers and compute a posterior predictive p-value of seeing", "outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\")", "searchfreq=None): \"\"\" Find periodicities in observed data and compute significance via MCMCs. First,", "+ \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul'])", "residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) print(\"Bayesian p-value for", "str(b)]) + ' is 2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper", "np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] =", "func1name + 'fit') fitpars2 = getattr(psfit, func2name + 'fit') if self.plot: ### plot", "from matplotlib.ticker import MaxNLocator try: import cPickle as pickle except ImportError: import pickle", "a QPO + background model to *every* frequency. NOTE: I rarely ever use", "and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m == 1: lpost", "+ \" +/- \" + str(pmaxpow_err)) # resfile('Upper limit on maximum signal power", "sim_merit, sim_srat = [], [], [], [], [], [], [] simno = 0", "'leahy': binpowers = binpowers / (self.ps.df * b * self.ps.nphots) elif self.ps.norm ==", "fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for x in sim_s5max if x >", "in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for x", "if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp", "The index for the noise parameter in func1 and func2. In the pre-defined", "data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict,", "data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\"", "package for running MCMC. If False, use Metropolis-Hastings. parname : list, optional, default", "fit using func2. The number of elements *must* equal the number of parameters", "highest power in the residuals and its frequency. Sample the posterior distribution of", "raise Exception(\"Summary must be a dictionary!\") probs = dict() postpars = dict() ###", "for r in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain = copy.copy(fitpars['popt']) #", "plot: boolean, optional, default True If True, several diagnostic plots will be saved", "brms else: continue ### Step 5: Compute Bayesian posterior probabilities of individual quantities", "lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort", "= float(len([x for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit", "interest from each fit: for i, x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x,", "plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out of MCMCs fakeper", "matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator try: import cPickle as pickle except", "distribution for the the simpler model (func1), pick parameter sets from the posterior", "' + str(b) + ' is rms = ' + str(brms)) bindict['bin' +", "\" +/- \" + str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt)", "\\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\"", "QPOs via a model selection approach using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum", "parameters and associated quantities fitpars1 = getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit,", "enumerate(fakeper): try: # print('popt' + str(i) + 'a : ' + str(fitpars['popt'])) fitfake", "power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max =", "= min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)])", "of ' + str(b) + ' is P = ' + str(bpow *", "+ str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities pdeviance_err =", "rms amplitude: ## first compute broadband noise model for binned frequencies bintemplate =", "+ str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3 bin", "use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed data and compute significance via MCMCs.", "print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax'", "broadband noise model for binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]:", "posterior do I need to use? if self.m == 1: lpost = posterior.PerPosterior(self.ps,", "optional, default True If True (STRONGLY RECOMMENDED), use the emcee package for running", "ain, obs=False, plot=True, plotname=plotstr + '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars)", "= np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note:", "str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow", "simulations, the p-value can be constrained only to 0.001). covfactor : float, optional,", "[], [], [], [], [] simno = 0 ### run QPO search on", "selection approach using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram object that", "x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for", "+ str(i) + ' failed! Returning ...') # return psfit, fakeper, mcobs sim_pars1", "for picking out narrow signals # # # class Bayes(object): \"\"\" Bayesian time", "print('popt' + str(i) + 'b : ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain,", "lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake", "equal the number of parameters n taken by func2. fitmethod : string, optional,", "out the MAP model and find the highest power in that periodogram. Create", "parameters, and returns an array of model powers The function should include a", "= fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ## which posterior do I need", "+ 'fit') if self.plot: ### plot the periodogram and best fit models psfit.plotfits(fitpars1,", "be func1. Then sample the posterior distribution for the the simpler model (func1),", "func2. In the pre-defined models, this index is *always* -1. \"\"\" resfilename =", "\" + str(p_srat) + \" +/- \" + str(psrat_err)) print(\"Bayesian p-value for Likelihood", "type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr self.plot = plot self.m =", "probs[x] = summary[x] else: postpars[x] = summary[x] print(\"The ensemble acceptance rate is \"", "x == 'p_s11max': file.write(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier:", "make strings for function names from function definition func1name = \"model1\" func2name =", "matplotlib.ticker import MaxNLocator try: import cPickle as pickle except ImportError: import pickle import", "highest power in that periodogram. Create a posterior distribution of maximum powers and", "sain = copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ### Step 4: Fit fake", "max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n, bins, patches =", "'p_': probs[x] = summary[x] else: postpars[x] = summary[x] print(\"The ensemble acceptance rate is", "a posterior predictive p-value that the data can be explained sufficiently with the", "fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities of interest: sim_lrt, sim_deviance,", "fiveperlim == 0: resfile('Warning! Too few simulations to compute five percent limit reliably!')", "= np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0", "import powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis import", "find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\"", "results. NOT USED! \"\"\" try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must", "5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) +", "parameter for the MCMC step. Used only in Metropolis-Hastings. use_emcee : boolean, optional,", "100.0, 300.0, 500.0, 1000.0] ## for 40 Hz: print(searchfreq) for bc in searchfreq:", "\" + str(ps11max_err)) # resfile('Upper limit on maximum signal power P_max_ul = '", "\" + str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value for the", "- p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))", "printobj=resfile, m=self.m) ### Step 3: create fake periodograms out of MCMCs fakeper =", "p_srat = float(len([x for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat))", "plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True,", "k taken by func1. func2 : function Parametric model for the periodogram. Needs", "\"model2\" ### step 1: fit both models to observation and compute LRT psfit", "the likelihood ratio at the maximum-a-posteriori paramters. If func1 and func2 differ in", "searched for QPOs namestr: string, optional, default \"test\" The string that will be", "* (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max)", "p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp))) ###", "str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" +", "observation/set of fake periodograms - search for QPOs via a model selection approach", "+ \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x", "print('popt' + str(i) + 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow'])", "str(bmaxpow_err)) resfile('The corresponding value of the T_R statistic at frequency f = '", "\\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t \"", "+ \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_srat': file.write(\"Bayesian", "# - search for QPOs via a model selection approach using LRTs #", "str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) =", "*must* equal the number of parameters n taken by func2. fitmethod : string,", "5000 Sets the length of the Markov chains. For Metropolis-Hastings, this needs to", "fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated", "float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x > fitpars1['deviance']])) /", "bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4,", "posterior distribution of the likelihood ratio. Note that this also sets the maximum", "optional, default None Include a list of strings here to set parameter names", "# resfile('Upper limit on maximum signal power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian", "print(searchfreq) for bc in searchfreq: if bc > (binps.freq[1] - binps.freq[0]): bind =", "For emcee, use as many as you can afford (~500) and fewer samples", "str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier: \" +", "analysis for time series # # This class defines a Bayes object that", "precision of the posterior predictive p-value (for 1000 simulations, the p-value can be", "== 'p_srat': print(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0]) +", "\"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings for function names from function definition", "mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<< ---", "srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) = \" +", "Fit fake periodograms: for i, x in enumerate(fakeper): try: # print('popt' + str(i)", "print(\"Simulation failed! Continuing ...\") # continue # print('popt' + str(i) + 'd :", "obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get out", "searchfreq: if bc > (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1", "bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow =", "str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x == 'p_lrt': file.write(", "compute the likelihood ratios such that it is possible to build up a", "fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] #", "srat: ' + str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" +", "upper limits for powers I_j depending on frequency binpowers = bmaxpow_ul * bintemplate", "+ str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s11max) +", "it is possible to build up a posterior distribution for the likelihood ratios", "np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 -", "for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x", "+/- \" + str(pmaxpow_err)) # resfile('Upper limit on maximum signal power P_max_ul =", "array-like} Input guesses for the MAP fit using func2. The number of elements", "max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3)", "str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt)))", "### Step 5: Compute Bayesian posterior probabilities of individual quantities p_deviance = float(len([x", "the highest [3 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \"", "src.SpectralAnalysis import posterior ########################################## # # class Bayes: Bayesian data analysis for time", "array of frequencies and n parameters, and returns an array of model powers", "compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2,", "(11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict':", "x in probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \"", "bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr +", "+ \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5", "posterior predictive p-value that the data can be explained sufficiently with the simpler", "\" + str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp)))", "the power at ' + str(bc) + 'Hz for a binning of '", "bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul resfile('The posterior p-value", "== 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency F=\"", "mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot,", "300.0, 500.0, 1000.0] ## for 40 Hz: print(searchfreq) for bc in searchfreq: if", "divide out the MAP model and find the highest power in that periodogram.", "p-value for the maximum residual power for a binning of ' + str(", "be last! par2 : {list, array-like} Input guesses for the MAP fit using", "func. In the pre-defined models, this index is *always* -1. use_emcee : boolean,", "a parameter setting a constant background level, and this parameter should be last!", "/ float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT)", "fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs']))", "mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot,", "= \" + str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt) =", "x == 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier", "simpler model. Parameters ---------- func1 : function Parametric model for the periodogram. Needs", "class defines a Bayes object that can: - pick between two models using", "of seeing the maximum power in the data under the null hypothesis (no", "float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x > fitpars1['ksp']])) /", "covfactor : float, optional, default 1.0 A tuning parameter for the MCMC step.", "\" +/- \" + str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value for KS", "expensive. Parameters ---------- func : function Parametric model for the periodogram. Needs to", "fake periodogram ' + str(i) + ' failed! Returning ...') # return psfit,", "/ float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp))) ### Display", "> fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for x in sim_s5max if x", "a summary of the analysis to file. NOT USED! :param summary: :param namestr:", "str(bpow * (self.ps.df * b * self.ps.nphots))) resfile('The upper limit on the rms", "* (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err", "print('popt3: ' + str(fitpars['popt'])) ### upper limit is the power in the sorted", "* 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3)", "running MCMC. If False, use Metropolis-Hastings. \"\"\" ## the file name where the", "of the analysis to file. NOT USED! :param summary: :param namestr: :return: \"\"\"", "a summary of the results. NOT USED! \"\"\" try: keys = summary.keys() except", "number of elements in this list or array must match the number of", "fit, the resulting residuals should follow a chi-square distribution with two degrees of", "MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0,", "limit on the power at ' + str(bc) + 'Hz for a binning", "x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat: '", "the output log file resfile = utils.TwoPrint(resfilename) ### step 1: fit model to", "+ str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model", "+ \"_findperiodicity_results.dat\" ## open the output log file resfile = utils.TwoPrint(resfilename) ### step", "= min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)])", "' + str(len(binpowers))) if searchfreq is None: searchfreq = [40.0, 70.0, 100.0, 300.0,", "can: # - pick between two models using likelihood ratio tests # -", "The number of simulations to use when computing the posterior distribution of the", "names for plotting noise1, noise2 : int, optional, default -1 The index for", "= [x[\"bindict\"] for x in sim_pars_all] ### get out binned powers: maxpows_all =", "float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt)) p_srat =", "x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" +", "distribution Attributes ---------- Examples -------- \"\"\" def __init__(self, ps, namestr='test', plot=True, m=1): assert", "str(i) + 'd : ' + str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ###", "and fewer samples niter : int, optional, default 5000 Sets the length of", "/ float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if x > obslrt]))", "function: \" + str(p_merit) + \" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for", "MaxNLocator try: import cPickle as pickle except ImportError: import pickle import copy import", "out narrow signals # # # class Bayes(object): \"\"\" Bayesian time series analysis", "RECOMMENDED), use the emcee package for running MCMC. If False, use Metropolis-Hastings. parname", "resfile = utils.TwoPrint(resfilename) ### make strings for function names from function definition func1name", "str(i) + ' failed! Returning ...') # return psfit, fakeper, mcobs sim_pars1 =", "with the simpler model. Parameters ---------- func1 : function Parametric model for the", "with func and compute the maximum-a-posteriori (MAP) estimate. Divide the data by the", "the posterior distribution of the likelihood ratio. Note that this also sets the", "str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) =", "psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\":", "str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0,", "at the maximum-a-posteriori paramters. If func1 and func2 differ in complexity, the less", "use Metropolis-Hastings. \"\"\" ## the file name where the output will be stored", "corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of Bayesian posterior probabilities", "= \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \"", "psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance}", "each frequency ### Like everything else, this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim]", "results on screen and make funky plots print(\"Bayesian p-value for deviance D =", "a function that takes an array of frequencies and k parameters, and returns", "bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms else: continue ### Step", "# print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m == 1: lpost", "format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err],", "maximum-a-posteriori paramters. If func1 and func2 differ in complexity, the less complex should", "keys = summary.keys() except AttributeError: raise Exception(\"Summary must be a dictionary!\") probs =", "func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute upper limits", "Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\")", "Display results on screen and make funky plots print(\"Bayesian p-value for deviance D", "\" + str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \" + str(p_merit) + \"", "and associated quantities fitpars1 = getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit, func2name", "+ \" is \" + str(x)) ### print posterior summary of parameters: print(\"--", "frequencies and k parameters, and returns an array of model powers. The function", "from src.SpectralAnalysis import posterior ########################################## # # class Bayes: Bayesian data analysis for", "x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] print(\"The ensemble acceptance", "of parameters: file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd", "smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power", "funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated LRTS and parameters in", "quantities fitpars1 = getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit, func2name + 'fit')", "70.0, 100.0, 300.0, 500.0, 1000.0] ## for 40 Hz: print(searchfreq) for bc in", "the T_R statistic is 2I/S = ' + str(bmaxpow_ul)) ### now turn upper", "if self.plot: ### plot the periodogram and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr,", "'d : ' + str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ### upper limit", "default 5000 Sets the length of the Markov chains. For Metropolis-Hastings, this needs", "of residuals: \" + str(probs[x][0]) + \" +/- \" + str( probs[x][1]) +", "p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow)", "+ str( probs[x][1]) + \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value for the", "0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\":", "parameters k taken by func1. func2 : function Parametric model for the periodogram.", "30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8", "= float(len([x for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt", "sim_srat if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) #", "\" + str(pdeviance_err)) print(\"Bayesian p-value for KS test: \" + str(p_ksp) + \"", "x == 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier", "fakeper, mcobs sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake, func2name +", "* self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0) #", "### now turn upper limit into an rms amplitude: ## first compute broadband", "str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True,", "Returning ...') # return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name + 'fit')", "file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i", "4: Fit fake periodograms and read out parameters of interest from each fit:", "+ str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5 bin", "\" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err", "return False, False, False else: ### Step 5: Compute Bayesian posterior probabilities of", "= \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) = \" +", "sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" +", "sufficiently with the simpler model. Parameters ---------- func1 : function Parametric model for", "Merit function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x", "+ '_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs':", "powerspectrum.Powerspectrum A periodogram object that is to be searched for QPOs namestr: string,", "Default uses BFGS, which is pretty robust for most purposes. nchain : int,", "func2name + 'fit') if self.plot: ### plot the periodogram and best fit models", "Monte Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'],", "signal power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max", "turn upper limit into an rms amplitude: ## first compute broadband noise model", "str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms else: continue ###", "return results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None,", "that the data can be explained sufficiently with the simpler model. Parameters ----------", "= posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set up Markov Chain Monte Carlo", "resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings for function", "bindicts = [x[\"bindict\"] for x in sim_pars_all] ### get out binned powers: maxpows_all", "+ str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found. Cannot compute autocorrelation times for", "the MAP fit using func2. The number of elements *must* equal the number", "probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"])", "+ str(bc) + 'Hz for a binning of ' + str(b) + '", "Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor,", "nsim : int, optional, default 1000 The number of simulations to use when", "\"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value for the highest [5 bin smoothed]", "resfile('The upper limit on the T_R statistic is 2I/S = ' + str(bmaxpow_ul))", "highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max)", "x > optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if", "200, 300, 500, 700, 1000] binlist = [r for r in fitpars[\"bindict\"].keys()] nbins", "= \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" +", "fewer samples niter : int, optional, default 5000 Sets the length of the", "for Parameter \" + str(i) + \" is \" + str(x)) ### print", "+ \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3", "x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for x in sim_s11max if", "str(pmerit_err)) print(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat) + \"", "+/- \" + str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value for Merit function:", "bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max']", "summary): \"\"\" Print a summary of the results. NOT USED! \"\"\" try: keys", "with the same models as the data, and compute the likelihood ratios such", "file.write(\"The $R_hat$ value for Parameter \" + str(i) + \" is \" +", "is rms = ' + str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' % bc]", "\\t \" + str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0])", "they're not already if self.ps.norm == 'leahy': binpowers = binpowers / (self.ps.df *", "two models using likelihood ratio tests - find periodicities by picking out the", "0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary =", "xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax,", "float(len([x for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max =", "ps self.namestr = namestr self.plot = plot self.m = m def choose_noise_model(self, func1,", ": boolean, optional, default True If True (STRONGLY RECOMMENDED), use the emcee package", "Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err", "bmaxpow_ul * bintemplate / 2.0 - bintemplate ## now compute rms amplitude at", "simulations to compute five percent limit reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow)", "now compute rms amplitude at 40, 70, 100 and 300 Hz ## first,", "slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim' + str(simno)", "be a function that takes an array of frequencies and n parameters, and", "resfile('The upper limit on the power at ' + str(bc) + 'Hz for", "Compute errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow)", "str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian", "and compute the likelihood ratios such that it is possible to build up", "psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func1) else:", "to observation and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1,", "str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err)) resfile('The corresponding value of the T_R", "search for QPOs via a model selection approach using LRTs Parameters ---------- ps", "the power in the sorted array where p_maxpow would be 0.05 ### i.e.", "summary[x] else: postpars[x] = summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile)", "degrees of freedom. Find the highest power in the residuals and its frequency.", "sure to use the right distribution Attributes ---------- Examples -------- \"\"\" def __init__(self,", "0: resfile(\"Analysis of Burst failed! Returning ...\") return False, False, False else: ###", "and make funky plots resfile(\"Bayesian p-value for maximum power P_max = \" +", "picking out narrow signals # # # class Bayes(object): \"\"\" Bayesian time series", "First, fit the periodogram with func and compute the maximum-a-posteriori (MAP) estimate. Divide", "patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m')", "str(p_ksp) + \" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \"", "(~500) and fewer samples niter : int, optional, default 5000 Sets the length", "where the output will be stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open", "= fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt' + str(i) + 'c :", "1, 2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 *", "of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value", "P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" +", "m=self.m) ### Step 3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim)", "ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation", "= summary[x] else: postpars[x] = summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary,", "If the periodogram used is the result of averaging several individual periodograms (or", "namestr=self.namestr, log=True) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost =", "is the power in the sorted array where p_maxpow would be 0.05 ###", "'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/-", "plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N = len(plotkeys) ### number of parameters fig", "of model powers The function should include a parameter setting a constant background", "plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr", "maximum signal power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance D", "and posterior distribution of parameters for x in keys: if x[:2] == 'p_':", "try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be a dictionary!\") probs", "model to *every* frequency. NOTE: I rarely ever use this because it's really", "\"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self, func,", "= \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" +", "= utils.TwoPrint(resfilename) ### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod,", "frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then", "autocorrelation times for the parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value", "probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\"", "NOTE: I rarely ever use this because it's really computationally expensive. Parameters ----------", "stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open the output log file resfile", "max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png',", "test: \" + str(p_ksp) + \" +/- \" + str(pksp_err)) print(\"Bayesian p-value for", "DO: Need to add smoothing for picking out narrow signals # # #", "and compute a posterior predictive p-value of seeing the maximum power in the", "P_max = \" + str(p_s3max) + \" +/- \" + str(ps3max_err)) # resfile('Upper", "range(N): ax = fig.add_subplot(N / 2 + 1, 2, i) n, bins, patches", "### simulate lots of realizations of the broadband noise model from MCMCs funcfake", "frequency ### Like everything else, this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ###", "* (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat)", "\" + str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\",", "in searchfreq: if bc > (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc) -", "x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if", "power P_max = \" + str(p_s5max) + \" +/- \" + str(ps5max_err)) #", "= p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' +", "fontsize=12) plt.subplot(2, 2, 3) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled')", "sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim' + str(simno) + '_qposearch')", "to build up a posterior distribution for the likelihood ratios and compute a", "plots print(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \" +/-", "[], [], [], [], [], [], [], [], [], [] bmax = int(self.ps.freq[-1]", "can be constrained only to 0.001). covfactor : float, optional, default 1.0 A", "obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate", "empty lists to store simulated LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance,", "float(len([x for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) =", "beginning): \" + str(len(self.ps.ps))) ### step 1: fit model to observation psfit =", "+ str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ###", "KS test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\")", "namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out of", "complex should be func1. Then sample the posterior distribution for the the simpler", "str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) =", "[unsmoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1]))", "### Step 3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all,", "\" + str(ps3max_err)) # resfile('Upper limit on maximum signal power P_max_ul = '", "in sim_merit if x > optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x", "/ float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if x > optpars['sobs']]))", "1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2,", "predictive p-value (for 1000 simulations, the p-value can be constrained only to 0.001).", "= \" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value for", "applications. nchain : int, optional, default 10 The number of chains or walkers", "+ \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul'])", "best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m == 1: lpost =", "str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in sim_pars_all] ### get out binned powers:", "= m def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0,", "\" + str(p_s3max) + \" +/- \" + str(ps3max_err)) # resfile('Upper limit on", "100, 200, 300, 500, 700, 1000] binlist = [r for r in fitpars[\"bindict\"].keys()]", "== 'p_s5max': file.write(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier: \"", "float(len([x for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp =", "fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain,", "# print('popt' + str(i) + 'd : ' + str(fitpars['popt'])) # print('popt3: '", "outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value for", "the results. NOT USED! \"\"\" try: keys = summary.keys() except AttributeError: raise Exception(\"Summary", "be of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr self.plot = plot", "str(i) + \" is \" + str(x) + \"\\n\") ### print posterior summary", "fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x >", "autocorrelation times are: \" + str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor not", "' + str(fitpars['popt'])) ### Step 4: Fit fake periodograms: for i, x in", "### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars", "posterior distribution of parameters for x in keys: if x[:2] == 'p_': probs[x]", "Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x ==", "in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \" + str(i) + \" is", "'p_srat': file.write(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0]) + \"", "= open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr + \"_summary.dat\",", "for Parameter \" + str(i) + \" is \" + str(x) + \"\\n\")", "sim_s5max, sim_s11max = [], [], [], [], [], [], [], [], [], [],", "in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x", "for Merit function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif", "color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png')", "amplitude: ## first compute broadband noise model for binned frequencies bintemplate = func(fitpars['bindict']['bin'", "str(p_merit) + \" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of", "Step 6: Compute errors of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0", "+ 'fit') fitpars2 = getattr(psfit, func2name + 'fit') if self.plot: ### plot the", "covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3:", "+ '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ###", "and func2, compute the likelihood ratio at the maximum-a-posteriori paramters. If func1 and", "## first compute broadband noise model for binned frequencies bintemplate = func(fitpars['bindict']['bin' +", "bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \" +", "sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt' + str(i) + 'c", "self.namestr funcname = str(func).split()[1] # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) ###", "+ \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11", "by func2. fitmethod : string, optional, default bfgs Allows the choice of different", "p_ksp = float(len([x for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp))", "+ str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for", "+/- \" + str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\" Write a summary", "### Display results on screen and make funky plots resfile(\"Bayesian p-value for maximum", "/ float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err =", "the sum of residuals: \" + str(probs[x][0]) + \" +/- \" + str(", "sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in", "namestr=None): \"\"\" Write a summary of the analysis to file. NOT USED! :param", "mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10,", "x in probs.keys(): if x == 'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio:", "[3 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \"", "plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for", "limit is the power in the sorted array where p_maxpow would be 0.05", "\" +/- \" + str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100,", "model (func1), pick parameter sets from the posterior to create fake periodograms. Fit", "+ str(probs[x][1]) + \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value for deviance D", "= summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file =", "+ str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs)", "BFGS, which is pretty robust for most purposes. nchain : int, optional, default", "500, 700, 1000] binlist = [r for r in fitpars[\"bindict\"].keys()] nbins = len(binlist)", "= summary[x] else: postpars[x] = summary[x] print(\"The ensemble acceptance rate is \" +", "\" + str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian p-value for Merit function: \"", "in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for x", "for x in sim_pars_all] ### get out binned powers: maxpows_all = {} binprob", "= [], [], [], [], [], [], [] simno = 0 ### run", ": {list, array-like} Input guesses for the parameters taken by func. The number", "of the likelihood ratio. Note that this also sets the maximum precision of", "read out parameters of interest from each fit: for i, x in enumerate(fakeper):", "fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt:", "= [], [], [], [], [], [], [], [], [], [], [] bmax", "\\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \" +", "and 300 Hz ## first, convert powers into rms normalization, if they're not", "self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m)", "QPOs via a model selection approach using LRTs # # # TO DO:", "try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1,", "if x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers from lowest to", "fake periodogram, find the MAP estimate, divide out the MAP model and find", "= np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err']", "data can be explained sufficiently with the simpler model. Parameters ---------- func1 :", "for binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0]))", "for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \"", "constant background level, and this parameter should be last! par1 : {list, array-like}", "function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x ==", "= ' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance)", "+ str(b)])) resfile('The upper limit on the T_R statistic is 2I/S = '", "= [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ## for 40 Hz: print(searchfreq) for", "40 Hz: print(searchfreq) for bc in searchfreq: if bc > (binps.freq[1] - binps.freq[0]):", "func1 and func2. In the pre-defined models, this index is *always* -1. \"\"\"", "\"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x ==", "of chains or walkers to use in MCMC. For Metropolis-Hastings, use ~10-20 and", "\" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value", "= 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts", "use as many as you can afford (~500) and fewer samples niter :", "powers for each frequency ### Like everything else, this is n-trial corrected! maxpow_ul", "= mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max,", "posterior probabilities of individual quantities p_maxpow = float(len([x for x in sim_maxpow if", "package for running MCMC. If False, use Metropolis-Hastings. \"\"\" if plotstr == None:", "of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) print(\"Bayesian p-value", "file.write(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" + str( probs[x][0]) +", "\" +/- \" + str(pmaxpow_err)) # resfile('Upper limit on maximum signal power P_max_ul", "pick between two models using likelihood ratio tests - find periodicities by picking", "if x > fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for x in sim_srat", "func, self.m) ### Step 2: Set up Markov Chain Monte Carlo Simulations ###", "= ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on the T_R statistic", "# # TO DO: Need to add smoothing for picking out narrow signals", "2, 2) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax", "else: ### Step 5: Compute Bayesian posterior probabilities of individual quantities p_maxpow =", "float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max", "\"\\t\" + str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1])", "mcobs sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake, func2name + 'fit')", "float(len([x for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit =", "by func1. func2 : function Parametric model for the periodogram. Needs to be", "import posterior ########################################## # # class Bayes: Bayesian data analysis for time series", "str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s11max': if", "= plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2,", "\" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit)))", "models using likelihood ratio tests - find periodicities by picking out the largest", "\"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \" +/- \" +", "at 40, 70, 100 and 300 Hz ## first, convert powers into rms", "series # # This class defines a Bayes object that can: # -", "Input guesses for the MAP fit using func2. The number of elements *must*", "the T_R statistic at frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)])", "a constant background level, and this parameter should be last! par1 : {list,", "fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too few simulations", "= plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2,", "right distribution Attributes ---------- Examples -------- \"\"\" def __init__(self, ps, namestr='test', plot=True, m=1):", "sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5: Compute Bayesian posterior probabilities", "limits for powers I_j depending on frequency binpowers = bmaxpow_ul * bintemplate /", "Compute Bayesian posterior probabilities of individual quantities p_maxpow = float(len([x for x in", "= psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt: ' +", "self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8", "x > fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow", "is p = ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err)) resfile('The", "self.ps.nphots) elif self.ps.norm == 'variance': binpowers = binpowers * self.ps.n ** 2.0 /", "used is the result of averaging several individual periodograms (or bins), this changes", "'_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except", "be a dictionary!\") probs = dict() postpars = dict() ### sort out p-values", "np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 -", "+ str(i) + \" is \" + str(x)) ### print posterior summary of", "bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with", "results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow,", "USED! :param summary: :param namestr: :return: \"\"\" if not namestr: namestr = self.namestr", "probs.keys(): if x == 'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio: \" +", "str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the", "in MCMC. For Metropolis-Hastings, use ~10-20 and many samples For emcee, use as", "= \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif", "range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" +", "binning of ' + str(b) + ' is rms = ' + str(brms))", "self.m == 1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m)", "# print('len(binpowers): ' + str(len(binpowers))) if searchfreq is None: searchfreq = [40.0, 70.0,", "the simpler model (func1), pick parameter sets from the posterior to create fake", "pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std,", "for function names from function definition func1name = \"model1\" func2name = \"model2\" ###", "resfile('Fitting of fake periodogram ' + str(i) + ' failed! Returning ...') #", "pickle except ImportError: import pickle import copy import numpy as np from src.SpectralAnalysis", "summary def print_summary(self, summary): \"\"\" Print a summary of the results. NOT USED!", "The number of elements in this list or array must match the number", "sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: # print(\"Simulation failed! Continuing ...\") # continue", ": ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i)", "+ str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs)", "[], [], [], [] bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0])))", "if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0,", "str(b) + '_ul_%.4fHz' % bc] = brms else: continue ### Step 5: Compute", "bmaxpow p_bmaxpow = float(len([x for x in bmaxpow if x > fitpars['bindict'][\"bmax\" +", "will be stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open the output log", "color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2)", "to 0.001). covfactor : float, optional, default 1.0 A tuning parameter for the", "pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp *", "P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \"", "p_bmaxpow = float(len([x for x in bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]]))", "str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for x in bindicts]) maxpows_all[\"bin\" + str(b)]", "compute broadband noise model for binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt'])", "sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for x in", "plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12)", "0.05*nsim simulations are higher than this ### note: sometimes simulations fail, therefore the", "Metropolis-Hastings, use ~10-20 and many samples For emcee, use as many as you", "power P_max = \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) #", "posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err =", "\\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in", "and this parameter should be last! par : {list, array-like} Input guesses for", "(STRONGLY RECOMMENDED), use the emcee package for running MCMC. If False, use Metropolis-Hastings.", "first compute broadband noise model for binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq,", "\"\"\" Find QPOs by fitting a QPO + background model to *every* frequency.", "Examples -------- \"\"\" def __init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps", "periodicities by picking out the largest power in # an observation/set of fake", "fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m == 1: lpost = posterior.PerPosterior(self.ps,", "+/- \" + str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value for KS test:", "+/- \" + str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest", "[], [], [] bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins", "parameter in func. In the pre-defined models, this index is *always* -1. use_emcee", "noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram ' + str(i) + ' failed!", "'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\":", "str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance", "(1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) /", "fig.add_subplot(N / 2 + 1, 2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30)", "of frequencies and k parameters, and returns an array of model powers. The", "and create fake periodograms from samples of the posterior. For each fake periodogram,", "models func1 and func2, compute the likelihood ratio at the maximum-a-posteriori paramters. If", "fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par,", "p-value for KS test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]))", "try: import cPickle as pickle except ImportError: import pickle import copy import numpy", "powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior", "m=self.m) ### find optimum QPO values for the real data obslrt, optpars, qpopars", "or array must match the number of parameters k taken by func. fitmethod", "Input guesses for the parameters taken by func. The number of elements in", "str(b) + ' is P = ' + str(bpow * (self.ps.df * b", "float(len([x for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort", "print(\"Highest [3 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) +", "== 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ###", "(1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort", "T_R statistic is 2I/S = ' + str(bmaxpow_ul)) ### now turn upper limit", "str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier: \" +", "'p_srat': print(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0]) + \"", "= psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate lots of realizations", "+/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \" + str(p_ksp) +", "+ str(probs[x][1]) + \"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value for the sum", "\"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def", "== 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] picklefile = open(namestr +", "parameter setting a constant background level, and this parameter should be last! par1", "/ float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x > fitpars1['deviance']]))", "\" + str(p_merit) + \" +/- \" + str(pmerit_err)) print(\"Bayesian p-value for the", "+ \" +/- \" + str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt,", "plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err],", "\"\"\" if plotstr == None: plotstr = self.namestr funcname = str(func).split()[1] # print(\"<<", "the null hypothesis (no QPO). Parameters ---------- func : function Parametric model for", "str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance)))", "# - pick between two models using likelihood ratio tests # - find", "P_max = \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) # resfile('Upper", "color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2)", "sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [], [], [] ###", "in range(N): ax = fig.add_subplot(N / 2 + 1, 2, i) n, bins,", "bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow'])", "Find QPOs by fitting a QPO + background model to *every* frequency. NOTE:", "/ 4 sain = copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ### Step 4:", "by picking out the largest power in # an observation/set of fake periodograms", "list, optional, default None Include a list of strings here to set parameter", "' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) +", "+/- \" + str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True,", "found. Cannot compute autocorrelation times for the parameters\") for i, x in enumerate(postpars[\"rhat\"]):", "this parameter should be last! par : {list, array-like} Input guesses for the", "number of elements *must* equal the number of parameters n taken by func2.", "print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \"", "A tuning parameter for the MCMC step. Used only in Metropolis-Hastings. use_emcee :", "posterior to create fake periodograms. Fit each fake periodogram with the same models", "*every* frequency. NOTE: I rarely ever use this because it's really computationally expensive.", "that periodogram. Create a posterior distribution of maximum powers and compute a posterior", "' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i) +", "compute five percent limit reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim", "lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ###", "= plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2)", "from the posterior to create fake periodograms. Fit each fake periodogram with the", "== 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier at", "number of elements *must* equal the number of parameters k taken by func1.", "for bc in searchfreq: if bc > (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq,", "ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max *", "Monte Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'],", "optional, default True If True, several diagnostic plots will be saved to disk", "x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if", "smaller, but it's a good idea to verify that the chains have mixed.", "# print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim]", "np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp))) ### Display results on screen and", "LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [],", "with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3 bin smoothed]", "+ str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s11max':", "== 'p_s11max': file.write(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier: \"", "in an observation/set of fake periodograms - search for QPOs via a model", "funky plots print(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \"", "Parameters ---------- func : function Parametric model for the periodogram. Needs to be", "in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \" + str(i) + \" is", "acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation times", "/ float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x > fitpars['deviance']]))", "float(len(sim_s3max)) p_s5max = float(len([x for x in sim_s5max if x > fitpars['s5max']])) /", "of averaging several individual periodograms (or bins), this changes the statistical distributions. Set", "the periodogram with func and compute the maximum-a-posteriori (MAP) estimate. Divide the data", "use ~10-20 and many samples For emcee, use as many as you can", "'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err],", "sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul resfile('The posterior p-value for the", "utils.TwoPrint(resfilename) ### make strings for function names from function definition func1name = \"model1\"", "\" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) # resfile('Upper limit on", "smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1]))", "when saving output (text files and plots) plot: boolean, optional, default True If", "1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in range(N): ax =", "the periodogram and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m ==", "periodograms - search for QPOs via a model selection approach using LRTs Parameters", "\" + str(probs[x][1]) + \"\\n\") elif x == 'p_merit': file.write( \"Bayesian p-value for", "/ (self.ps.df * b * self.ps.nphots ** 2.0) # print('len(binps.freq): ' + str(len(binps.freq)))", "+ 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain,", "p_ksp = float(len([x for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp))", "+ str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys():", "F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value", "for powers I_j depending on frequency binpowers = bmaxpow_ul * bintemplate / 2.0", "file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5%", "print(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier: \" + str(", "few simulations to compute five percent limit reliably!') fiveperlim = 1 ninetyfiveperlim =", "+ str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in sim_pars_all] ### get out binned", "is \" + str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation times are: \"", "+ str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N =", "mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) #", "continue ### Step 5: Compute Bayesian posterior probabilities of individual quantities p_maxpow =", "fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt: '", "that takes an array of frequencies and n parameters, and returns an array", "x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for", "\" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) +", "patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4,", "compute significance via MCMCs. First, fit the periodogram with func and compute the", "fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if x >", "p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err", "\" + str(i) + \" is \" + str(x) + \"\\n\") ### print", "and compute significance via MCMCs. First, fit the periodogram with func and compute", "\" + str(probs[x][0]) + \" +/- \" + str( probs[x][1]) + \"\\n\") elif", "\" + str( probs[x][1]) + \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value for", "for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x", "b in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)]", "func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram ' + str(i)", "[5 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \"", "else, this is n-trial corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: '", "\"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier at frequency F=\" +", "print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in sim_pars_all] ### get", "$R_hat$ value for Parameter \" + str(i) + \" is \" + str(x)", "probs[x] = summary[x] else: postpars[x] = summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\")", "class Bayes: Bayesian data analysis for time series # # This class defines", "m=self.m) # print('popt' + str(i) + 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance'])", "covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting a QPO + background", "bins = [1, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100,", "str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s3max) + \"", "2: Set up Markov Chain Monte Carlo Simulations ### of model 1: mcobs", "the maximum-a-posteriori paramters. If func1 and func2 differ in complexity, the less complex", "range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" +", "+ \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian", "optional, default 5000 Sets the length of the Markov chains. For Metropolis-Hastings, this", "+ \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul'])", "can be explained sufficiently with the simpler model. Parameters ---------- func1 : function", "p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) plrt_err", "+ str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"])", "funcfake: try: simno = simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt,", "averaging several individual periodograms (or bins), this changes the statistical distributions. Set m", "+ str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value for the highest", "P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier:", "isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr =", "for simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat", "for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: '", "the default (bfgs) should be sufficient for most applications. nchain : int, optional,", "background level, and this parameter should be last! par1 : {list, array-like} Input", "use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO values for the real data obslrt,", "\"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in", "can be smaller, but it's a good idea to verify that the chains", "> fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if x", "posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set up Markov Chain Monte Carlo Simulations", "likelihood ratio tests # - find periodicities by picking out the largest power", "is \" + str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation times are: \"", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\" in", "use_emcee=True): \"\"\" Find QPOs by fitting a QPO + background model to *every*", "MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities of interest: sim_lrt,", "m to the number of periodograms averaged to be sure to use the", "obs=True, plotname=self.namestr + '_loglikes') ### simulate lots of realizations of the broadband noise", "at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"]))", "= \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" +", "- p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))", "[unsmoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])", "mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior ########################################## # # class", "except KeyError: file.write(\"Module Acor not found. Cannot compute autocorrelation times for the parameters", ": string, optional, default \"bfgs\" Choose the optimization algorithm used when minimizing the", "of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain,", "p-value for the highest [11 bin smoothed] data/model outlier: \" + str( probs[x][0])", "likelihood ratios such that it is possible to build up a posterior distribution", "Bayesian time series analysis This class defines a Bayes object that can: -", "to be searched for QPOs namestr: string, optional, default \"test\" The string that", "p-value for Merit function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])", "\" + str(p_merit) + \" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for the", "/ float(len(sim_ksp))) ### Display results on screen and make funky plots resfile(\"Bayesian p-value", "def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\"", "[unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian", "nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed data and", "= \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" +", "elif x == 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed] data/model", "(self.ps.df * b * self.ps.nphots ** 2.0) # print('len(binps.freq): ' + str(len(binps.freq))) #", "at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"]))", "str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s11max) + \"", "for running MCMC. If False, use Metropolis-Hastings. parname : list, optional, default None", "str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) =", "\"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000,", "Set m to the number of periodograms averaged to be sure to use", "names from function definition func1name = \"model1\" func2name = \"model2\" ### step 1:", "str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul resfile('The posterior", "selection approach using LRTs # # # TO DO: Need to add smoothing", "= self.namestr + \"_findperiodicity_results.dat\" ## open the output log file resfile = utils.TwoPrint(resfilename)", "/ float(len(sim_ksp))) ### Display results on screen and make funky plots print(\"Bayesian p-value", "the posterior distribution of parameters for func using MCMC, and create fake periodograms", "the result of averaging several individual periodograms (or bins), this changes the statistical", "funky plots resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_maxpow) +", "namestr self.plot = plot self.m = m def choose_noise_model(self, func1, par1, func2, par2,", "+ str(b)] = bmaxpow p_bmaxpow = float(len([x for x in bmaxpow if x", "* (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max)", "Summary of Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95%", "### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter,", "limit on the T_R statistic is 2I/S = ' + str(bmaxpow_ul)) ### now", "sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [],", "# print('popt' + str(i) + 'a : ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x,", "model for binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" +", "a binning of ' + str( self.ps.df * b) + 'Hz is p", "in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for x", "Parameters ---------- func1 : function Parametric model for the periodogram. Needs to be", "for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x", "float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x > optpars['merit']])) /", "parameters for each for x in funcfake: try: simno = simno + 1", "+ str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier: \"", "KeyboardInterrupt: break ### Step 5: Compute Bayesian posterior probabilities of individual quantities p_deviance", "plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr +", "print(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier: \" + str(", "of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))", "for highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test',", "likelihood ratio at the maximum-a-posteriori paramters. If func1 and func2 differ in complexity,", "p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))", "- p_s11max) / float(len(sim_ksp))) ### Display results on screen and make funky plots", "p_merit = float(len([x for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit))", "None Include a list of strings here to set parameter names for plotting", "x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for", "from function definition func1name = \"model1\" func2name = \"model2\" ### step 1: fit", "use the emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\" ##", "P_max = \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value", "corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul =", "Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor,", "searchfreq is None: searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ## for", "np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 -", "(1.0 - p_s11max) / float(len(sim_ksp))) ### Display results on screen and make funky", "sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: # print(\"Simulation failed! Continuing", "search for QPOs via a model selection approach using LRTs # # #", "pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\":", ": float, optional, default 1.0 A tuning parameter for the MCMC step. Used", "None: searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ## for 40 Hz:", "of parameters for func using MCMC, and create fake periodograms from samples of", "Metropolis-Hastings. \"\"\" if plotstr == None: plotstr = self.namestr funcname = str(func).split()[1] #", "fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed", "but it's a good idea to verify that the chains have mixed. nsim", "fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4: ' + str(fitpars['popt']))", "\"w\") file.write(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\\n\") try:", "for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x", "== 'leahy': binpowers = binpowers / (self.ps.df * b * self.ps.nphots) elif self.ps.norm", "model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated LRTS", "+ str(p_srat) + \" +/- \" + str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio:", "elif x == 'p_s5max': file.write(\"Bayesian p-value for the highest [5 bin smoothed] data/model", "+ str(ps5max_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' +", "* n, \"p = \" + str(pars[plotkeys[i]][1])) ax.title(\"Posterior for \" + plotkeys[i]) return", "MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated LRTS and parameters", "model; for a perfect data-model fit, the resulting residuals should follow a chi-square", "via a model selection approach using LRTs # # # TO DO: Need", "the emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\" ## the", "\"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\":", "which posterior do I need to use? if self.m == 1: lpost =", "out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow,", "src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis", "QPO). Parameters ---------- func : function Parametric model for the periodogram. Needs to", "- find periodicities by picking out the largest power in # an observation/set", "par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram ' + str(i) +", "fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2,", "the maximum precision of the posterior predictive p-value (for 1000 simulations, the p-value", "fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt' + str(i) + 'c : '", "+ str(pmaxpow_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' +", "\" + str(len(self.ps.ps))) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost", "'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat,", "0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2,", "the MAP model; for a perfect data-model fit, the resulting residuals should follow", "for maximum power P_max = \" + str(p_s5max) + \" +/- \" +", "max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2,", "== 0: resfile('Warning! Too few simulations to compute five percent limit reliably!') fiveperlim", "compute the maximum-a-posteriori (MAP) estimate. Divide the data by the MAP model; for", "P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier:", "bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in range(N): ax = fig.add_subplot(N /", "* b * self.ps.nphots ** 2.0) # print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers):", "### find optimum QPO values for the real data obslrt, optpars, qpopars =", "elif x == 'p_deviance': print(\"Bayesian p-value for deviance D = \" + str(probs[x][0])", "' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in sim_pars_all] ### get out", "False, use Metropolis-Hastings. \"\"\" if plotstr == None: plotstr = self.namestr funcname =", "upper limit on the power at ' + str(bc) + 'Hz for a", "+ str(bpow * (self.ps.df * b * self.ps.nphots))) resfile('The upper limit on the", "bmaxpow_ul resfile('The posterior p-value for the maximum residual power for a binning of", "tuning parameter for the MCMC step. Used only in Metropolis-Hastings. parname : list,", "np.msort(bmaxpow) ### note: this is the limit for 2*I/S --> multiply by S", "a dictionary!\") probs = dict() postpars = dict() ### sort out p-values and", "its frequency. Sample the posterior distribution of parameters for func using MCMC, and", "posterior distribution of parameters for func using MCMC, and create fake periodograms from", "\" + str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value for the", "- p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp)))", "+ \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value for the highest [11 bin", "> fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x", "sim_merit if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in", "0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n,", "sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis of Burst", "LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1,", "nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO values for", "Compute Bayesian posterior probabilities of individual quantities p_deviance = float(len([x for x in", "from src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior ########################################## # # class Bayes:", "= str(func).split()[1] # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) ### step 1:", "powers and compute a posterior predictive p-value of seeing the maximum power in", "mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self,", "'Hz is p = ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err))", "maxpows_all = {} binprob = {} for b in bins[:nbins]: binps = fitpars['bindict']['bin'", "outlier: \" + str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys()", "print(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/- \" +", "should be last! par1 : {list, array-like} Input guesses for the MAP fit", "Find periodicities in observed data and compute significance via MCMCs. First, fit the", "summary of parameters: file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t", "the parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \"", "+ str(fitpars['popt'])) ### upper limit is the power in the sorted array where", "3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 200, 300, 500,", "self.ps.freq[0]))) bins = [1, 3, 5, 7, 10, 15, 20, 30, 50, 70,", "will be used to identify this periodogram when saving output (text files and", "+ str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i) + 'b", "np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 -", "\"\"\" Bayesian time series analysis This class defines a Bayes object that can:", "probabilities of individual quantities p_deviance = float(len([x for x in sim_deviance if x", "if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp", "'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \"", "if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit", "for running MCMC. If False, use Metropolis-Hastings. \"\"\" if plotstr == None: plotstr", "LRTs # # # TO DO: Need to add smoothing for picking out", "Step 5: Compute Bayesian posterior probabilities of individual quantities p_maxpow = float(len([x for", "parameter should be last! par2 : {list, array-like} Input guesses for the MAP", "for deviance D = \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])", "ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp))) ### Display results on", "1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False,", "Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\")", "b * self.ps.nphots) elif self.ps.norm == 'variance': binpowers = binpowers * self.ps.n **", "getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake, func2name + 'fit') # if lrt", "> optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x", "+ \" +/- \" + str(pmerit_err)) print(\"Bayesian p-value for the np.sum of residuals:", "1000 simulations, the p-value can be constrained only to 0.001). covfactor : float,", "constrained only to 0.001). covfactor : float, optional, default 1.0 A tuning parameter", "find periodicities by picking out the largest power in # an observation/set of", "+ \" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \" +", "1.0 A tuning parameter for the MCMC step. Used only in Metropolis-Hastings. use_emcee", "Hz ## first, convert powers into rms normalization, if they're not already if", "times are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found. Cannot compute", "nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed", "simulated LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat =", "Display results on screen and make funky plots resfile(\"Bayesian p-value for maximum power", "if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp", "to get powers for each frequency ### Like everything else, this is n-trial", "plotstr == None: plotstr = self.namestr funcname = str(func).split()[1] # print(\"<< --- len(self.ps", "powers. The function should include a parameter setting a constant background level, and", "rms amplitude at ' + str(bc) + 'Hz for a binning of '", "nchain : int, optional, default 10 The number of chains or walkers to", "and this parameter should be last! par2 : {list, array-like} Input guesses for", "MCMC. For Metropolis-Hastings, use ~10-20 and many samples For emcee, use as many", "\"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3 bin", "sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis of", "= len(binlist) / 4 sain = copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ###", "+/- \" + str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value for the sum", "the highest [5 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \"", "optional, default 1 If the periodogram used is the result of averaging several", "* max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt,", "### note: sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims) fiveperlim", "\"acceptance\": mcobs.acceptance} return results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0,", "== 'p_s3max': file.write(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier: \"", "max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n,", "\" + str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11", "F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value", "+/- \" + str( probs[x][1]) + \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value", "parameters for x in keys: if x[:2] == 'p_': probs[x] = summary[x] else:", "float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp", "Bayesian posterior probabilities of individual quantities p_maxpow = float(len([x for x in sim_maxpow", "also sets the maximum precision of the posterior predictive p-value (for 1000 simulations,", "m=self.m) ### get out best fit parameters and associated quantities fitpars1 = getattr(psfit,", "upper limit is the power in the sorted array where p_maxpow would be", "+/- \" + str(ps3max_err)) # resfile('Upper limit on maximum signal power P_max_ul =", "### Step 3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ###", "= dict() postpars = dict() ### sort out p-values and posterior distribution of", "not found. Cannot compute autocorrelation times for the parameters\") for i, x in", "file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\")", "= getattr(fitfake, func2name + 'fit') # if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2,", "on the rms amplitude at ' + str(bc) + 'Hz for a binning", "for KS test: \" + str(p_ksp) + \" +/- \" + str(pksp_err)) print(\"Bayesian", "powers from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort =", "is the result of averaging several individual periodograms (or bins), this changes the", "str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on the T_R statistic is 2I/S =", "large (>10000) For emcee, this can be smaller, but it's a good idea", "### step 1: fit both models to observation and compute LRT psfit =", "list or array must match the number of parameters k taken by func.", "...\") # continue # print('popt' + str(i) + 'd : ' + str(fitpars['popt']))", "failed! Returning ...\") return False, False, False else: ### Step 5: Compute Bayesian", "* b * self.ps.nphots) elif self.ps.norm == 'variance': binpowers = binpowers * self.ps.n", "maximum power P_max = \" + str(p_s3max) + \" +/- \" + str(ps3max_err))", "m=self.m) # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m == 1:", "sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5:", "+ str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s3max':", "first, convert powers into rms normalization, if they're not already if self.ps.norm ==", "x > optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if", "file.write(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier: \" + str(", "x in keys: if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] =", "# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian", "is 2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on the", "\" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_ksp':", "array-like} Input guesses for the MAP fit using func1. The number of elements", "= float(len([x for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max", "noise model for binned frequencies bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \"", "= ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err)) resfile('The corresponding value", "'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency F=\" +", "noise parameter in func1 and func2. In the pre-defined models, this index is", "Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram object that is to be searched", "k parameters, and returns an array of model powers. The function should include", "the highest [11 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \"", "object that can: # - pick between two models using likelihood ratio tests", "str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \" +/-", "resfile('Upper limit on maximum signal power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value", "+ str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat)", "open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) +", "check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out", "if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs)", "= plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr", "to file. NOT USED! :param summary: :param namestr: :return: \"\"\" if not namestr:", "probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\"", "in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x", "+ \" +/- \" + str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for highest", "soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim' + str(simno) +", "lpost = posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2:", "3) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax =", "nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting a QPO +", "the data under the null hypothesis (no QPO). Parameters ---------- func : function", "patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) /", "ratio tests - find periodicities by picking out the largest power in an", "plt.subplot(2, 2, 1) n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin,", "\" +/- \" + str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n, bins, patches", "+ str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute upper limits for", "p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err", "if they're not already if self.ps.norm == 'leahy': binpowers = binpowers / (self.ps.df", "use this because it's really computationally expensive. Parameters ---------- func : function Parametric", "\" +/- \" + str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\" Write a", "\" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found. Cannot compute autocorrelation times", "data, and compute the likelihood ratios such that it is possible to build", "## for 40 Hz: print(searchfreq) for bc in searchfreq: if bc > (binps.freq[1]", "format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err],", "optional, default \"bfgs\" Choose the optimization algorithm used when minimizing the -log-likelihood. Choices", "binpowers[bind] brms = np.sqrt(bpow * b * self.ps.df) resfile('The upper limit on the", "15, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000] binlist =", "power P_max = \" + str(p_s3max) + \" +/- \" + str(ps3max_err)) #", "keys: if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] print(\"The", "screen and make funky plots resfile(\"Bayesian p-value for maximum power P_max = \"", "5, 7, 10, 15, 20, 30, 50, 70, 100, 200, 300, 500, 700,", "pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt *", "+ str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) # resfile('Upper limit on maximum", "'p_s5max': file.write(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier: \" +", "+ \" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value for KS test: \" +", "the analysis to file. NOT USED! :param summary: :param namestr: :return: \"\"\" if", "(3) data', fontsize=12) plt.subplot(2, 2, 3) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True,", "+ '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\":", "powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr self.plot = plot self.m = m", "self.ps.nphots ** 2.0) # print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers)))", "- pick between two models using likelihood ratio tests - find periodicities by", "upper limit on the rms amplitude at ' + str(bc) + 'Hz for", "\"\"\" ## the file name where the output will be stored resfilename =", "+ str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ### upper limit is the power", "sets from the posterior to create fake periodograms. Fit each fake periodogram with", "model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname,", "in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2,", "psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict", "str(b)] for x in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)]", "postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x == 'p_lrt': file.write( \"Bayesian", "# # class Bayes(object): \"\"\" Bayesian time series analysis This class defines a", "xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax,", "str(b)])) resfile('The upper limit on the T_R statistic is 2I/S = ' +", "break if len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed! Returning ...\") return False,", "returns an array of model powers The function should include a parameter setting", "Need to add smoothing for picking out narrow signals # # # class", "float(len(sim_s11max)) ### sort maximum powers from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort", "\"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary): \"\"\" Print a summary", "optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate lots", "\" + str(bintemplate[0])) ## then compute upper limits for powers I_j depending on", "P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier:", "pickle import copy import numpy as np from src.SpectralAnalysis import utils from src.SpectralAnalysis", "file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor", "float(len([x for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt =", "Step 4: Fit fake periodograms: for i, x in enumerate(fakeper): try: # print('popt'", "for the parameters taken by func. The number of elements in this list", "import mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior ########################################## # #", "to be large (>10000) For emcee, this can be smaller, but it's a", "mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i) + 'b : ' + str(fitpars['popt']))", "resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open the output log file resfile =", "fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)]))", "{\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul':", "= simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars =", "mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow,", "+ str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul resfile('The", "sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed", "the residuals and its frequency. Sample the posterior distribution of parameters for func", "\"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\\n\")", "from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior ##########################################", "= np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this is the limit for 2*I/S", "\" +/- \" + str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for highest [unsmoothed]", "likelihood ratio tests - find periodicities by picking out the largest power in", "= np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0", "resfile(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat) + \" +/-", "str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt' + str(i) + 'b :", "/ 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in", "set parameter names for plotting noise: int, optional, default -1 The index for", "test: \" + str(p_ksp) + \" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value for", "\" + str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \"", "parameter should be last! par : {list, array-like} Input guesses for the parameters", "+ str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for", "be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too", "for b in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" +", "to use in MCMC. For Metropolis-Hastings, use ~10-20 and many samples For emcee,", "sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: # print(\"Simulation failed! Continuing ...\") #", "x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if", "func1 : function Parametric model for the periodogram. Needs to be a function", "this is n-trial corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' +", "statistic is 2I/S = ' + str(bmaxpow_ul)) ### now turn upper limit into", "last! par2 : {list, array-like} Input guesses for the MAP fit using func2.", "str(p_merit) + \" +/- \" + str(pmerit_err)) print(\"Bayesian p-value for the np.sum of", "namestr = self.namestr try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be", "resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6:", "by the MAP model; for a perfect data-model fit, the resulting residuals should", "+ str(bintemplate[0])) ## then compute upper limits for powers I_j depending on frequency", "this changes the statistical distributions. Set m to the number of periodograms averaged", "self.namestr + \"_findperiodicity_results.dat\" ## open the output log file resfile = utils.TwoPrint(resfilename) ###", "\"\"\" Find periodicities in observed data and compute significance via MCMCs. First, fit", "+ background model to *every* frequency. NOTE: I rarely ever use this because", "if x > optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt", "self.ps.df * b) + 'Hz is p = ' + str(p_bmaxpow) + '", "# print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) =", "Bayes: Bayesian data analysis for time series # # This class defines a", "QPOs namestr: string, optional, default \"test\" The string that will be used to", "str(fitpars['popt'])) ## which posterior do I need to use? if self.m == 1:", "\\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\"", "x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \" + str(i) + \"", "default None Include a list of strings here to set parameter names for", "max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy')", "= ps self.namestr = namestr self.plot = plot self.m = m def choose_noise_model(self,", "QPO + background model to *every* frequency. NOTE: I rarely ever use this", "str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value for the highest [5", "str(i) + 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq'])", "' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b)", "emcee, use as many as you can afford (~500) and fewer samples niter", "psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max *", "xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0,", "[], [], [], [], [], [], [] ### Step 4: Fit fake periodograms", "ratios such that it is possible to build up a posterior distribution for", "the resulting residuals should follow a chi-square distribution with two degrees of freedom.", "1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step", "+ \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul'])", "### get out best fit parameters and associated quantities fitpars1 = getattr(psfit, func1name", "on maximum signal power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance", "Bayes object that can: - pick between two models using likelihood ratio tests", "probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest", "pickle.dump(summary, picklefile) picklefile.close() file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate", "psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2 = getattr(fitfake, func2name", "For each fake periodogram, find the MAP estimate, divide out the MAP model", "the MCMC step. Used only in Metropolis-Hastings. parname : list, optional, default None", "'fit') sim_pars2 = getattr(fitfake, func2name + 'fit') # if lrt > 20: #", "\" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs']))", "/ float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x > optpars['ksp']]))", "highest [5 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/-", "- p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))", "nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting a", "mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr,", "func and compute the maximum-a-posteriori (MAP) estimate. Divide the data by the MAP", "fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0,", "rms = ' + str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' % bc] =", "print posterior summary of parameters: file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t", "plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary", "parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [],", "' + str(bpow * (self.ps.df * b * self.ps.nphots))) resfile('The upper limit on", "simulations to use when computing the posterior distribution of the likelihood ratio. Note", "+ \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \"", "str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value for the highest [11", "(1.0 - p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) /", "= mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee,", "str(probs[x][1])) elif x == 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier", "the pre-defined models, this index is *always* -1. \"\"\" resfilename = self.namestr +", ": {list, array-like} Input guesses for the MAP fit using func1. The number", "func. fitmethod : string, optional, default \"bfgs\" Choose the optimization algorithm used when", "covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO", "lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set up Markov Chain Monte", "sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed! Returning", "+ \"\\n\") ### print posterior summary of parameters: file.write(\"-- Posterior Summary of Parameters:", "plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate lots of realizations of the broadband", "data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\"", "both models to observation and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt", "+ str(b) + '_ul'] = bmaxpow_ul resfile('The posterior p-value for the maximum residual", "Burst failed! Returning ...\") return False, False, False else: ### Step 5: Compute", "+ str(b)] for x in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' +", "str(p_srat) + \" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \"", "mle.py, but the default (bfgs) should be sufficient for most applications. nchain :", "* self.ps.nphots))) resfile('The upper limit on the rms amplitude at ' + str(bc)", "p-value for maximum power P_max = \" + str(p_maxpow) + \" +/- \"", "power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3 bin smoothed] data/model", "2, 1) n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax", "periodogram with func and compute the maximum-a-posteriori (MAP) estimate. Divide the data by", "complexity, the less complex should be func1. Then sample the posterior distribution for", "p_s5max = float(len([x for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max))", "* (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max)", "many samples For emcee, use as many as you can afford (~500) and", "x > obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if", "str(ps5max_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' + str(s5max_ul))", "p-value for the highest [5 bin smoothed] data/model outlier: \" + str( probs[x][0])", "str(probs[x][1]) + \"\\n\") elif x == 'p_merit': file.write( \"Bayesian p-value for Merit function:", "Create a posterior distribution of maximum powers and compute a posterior predictive p-value", "to verify that the chains have mixed. nsim : int, optional, default 1000", "1000.0] ## for 40 Hz: print(searchfreq) for bc in searchfreq: if bc >", "results on screen and make funky plots resfile(\"Bayesian p-value for maximum power P_max", "str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) =", "* pars[plotkeys[i]][0], 0.8 * n, \"p = \" + str(pars[plotkeys[i]][1])) ax.title(\"Posterior for \"", "to use? if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost =", "ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat':", "string that will be used to identify this periodogram when saving output (text", "b) + 'Hz is p = ' + str(p_bmaxpow) + ' +/- '", "func2name = \"model2\" ### step 1: fit both models to observation and compute", "obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get out best", "power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5 bin smoothed] data/model", "the data can be explained sufficiently with the simpler model. Parameters ---------- func1", "of parameters: print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd", "namestr: namestr = self.namestr try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must", "[], [], [], [] ### Step 4: Fit fake periodograms and read out", "str( probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") file.write( \"Upper limit", "Set up Markov Chain Monte Carlo Simulations ### of model 1: mcobs =", "noise1=noise1, noise2=noise2, m=self.m) ### get out best fit parameters and associated quantities fitpars1", "the statistical distributions. Set m to the number of periodograms averaged to be", "sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [], [], [], [],", "[1, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 200, 300,", "= brms else: continue ### Step 5: Compute Bayesian posterior probabilities of individual", "x in sim_merit if x > optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for", "\"\"\" Fit two models func1 and func2, compute the likelihood ratio at the", "use in MCMC. For Metropolis-Hastings, use ~10-20 and many samples For emcee, use", "noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models func1 and func2, compute the likelihood", "[], [], [], [] simno = 0 ### run QPO search on each", "sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow)", "Step 3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance,", "= np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0", "== 'p_merit': print(\"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \" +/-", "resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \"", "not already if self.ps.norm == 'leahy': binpowers = binpowers / (self.ps.df * b", "str(p_s11max) + \" +/- \" + str(ps11max_err)) # resfile('Upper limit on maximum signal", "to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1,", "P_max = \" + str(p_s11max) + \" +/- \" + str(ps11max_err)) # resfile('Upper", "== 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/-", "= \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" +", "[], [] bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins =", "+ 'Hz for a binning of ' + str(b) + ' is P", "of maximum powers and compute a posterior predictive p-value of seeing the maximum", "find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find", "noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m ==", "ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ### simulate lots of realizations of the", "level, and this parameter should be last! par : {list, array-like} Input guesses", "posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set up", "str(p_s3max) + \" +/- \" + str(ps3max_err)) # resfile('Upper limit on maximum signal", "= min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)])", "keys: if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] picklefile", "'Hz for a binning of ' + str(b) + ' is P =", "* self.ps.df) resfile('The upper limit on the power at ' + str(bc) +", "* len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too few simulations to compute five", "the periodogram used is the result of averaging several individual periodograms (or bins),", "probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance", "str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat) + \"", "= ' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" +", "par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True):", "reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4: ' +", "+ str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat)", "highest [unsmoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \" +", "except KeyError: print(\"Module Acor not found. Cannot compute autocorrelation times for the parameters\")", "of different minimization algorithms. Default uses BFGS, which is pretty robust for most", "saved to disk m: integer, optional, default 1 If the periodogram used is", "fitmethod : string, optional, default bfgs Allows the choice of different minimization algorithms.", "limit on the rms amplitude at ' + str(bc) + 'Hz for a", "for QPOs via a model selection approach using LRTs # # # TO", "this also sets the maximum precision of the posterior predictive p-value (for 1000", "x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if", "+ str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n, bins, patches = plt.hist(sim_maxpow, bins=100,", "dictionary!\") probs = dict() postpars = dict() ### sort out p-values and posterior", "Merit function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\")", "str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul']", "sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [], [], []", "background level, and this parameter should be last! par : {list, array-like} Input", "emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\" ## the file", "the maximum residual power for a binning of ' + str( self.ps.df *", "or walkers to use in MCMC. For Metropolis-Hastings, use ~10-20 and many samples", "= mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp,", "in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain = copy.copy(fitpars['popt']) # print('popt2: '", "of parameters for x in keys: if x[:2] == 'p_': probs[x] = summary[x]", "with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11 bin smoothed]", "== 'p_deviance': file.write(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) + \"", "out best fit parameters and associated quantities fitpars1 = getattr(psfit, func1name + 'fit')", "optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if x >", "plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3) n,", "1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model", "to the number of periodograms averaged to be sure to use the right", "default True If True (STRONGLY RECOMMENDED), use the emcee package for running MCMC.", "fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0,", "str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_deviance':", "str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/- \"", "+ str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value for deviance D = \"", "of fake periodograms # - search for QPOs via a model selection approach", "integer, optional, default 1 If the periodogram used is the result of averaging", "x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1,", "str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step", "+ str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of", "the optimization algorithm used when minimizing the -log-likelihood. Choices are listed in mle.py,", "xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax,", ":param summary: :param namestr: :return: \"\"\" if not namestr: namestr = self.namestr try:", "of parameters n taken by func2. fitmethod : string, optional, default bfgs Allows", "= [1, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 200,", "number of parameters k taken by func. fitmethod : string, optional, default \"bfgs\"", "step 1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars =", "resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_maxpow) + \" +/-", "str(i) + \" is \" + str(x)) ### print posterior summary of parameters:", "picklefile.close() file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is \"", "maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err", "x > fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for x in sim_srat if", "### empty lists for simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit,", "a chi-square distribution with two degrees of freedom. Find the highest power in", "are listed in mle.py, but the default (bfgs) should be sufficient for most", "P = ' + str(bpow * (self.ps.df * b * self.ps.nphots))) resfile('The upper", "elif x == 'p_ksp': file.write(\"Bayesian p-value for KS test: \" + str(probs[x][0]) +", "fake periodograms. Fit each fake periodogram with the same models as the data,", "normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png')", "power for a binning of ' + str( self.ps.df * b) + 'Hz", "self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings for function names from", "+ \" +/- \" + str(pksp_err)) print(\"Bayesian p-value for Merit function: \" +", "Acor not found. Cannot compute autocorrelation times for the parameters \\n\") for i,", "higher than this ### note: sometimes simulations fail, therefore the 5% limit should", "the same models as the data, and compute the likelihood ratios such that", "n-trial corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul", "float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x > fitpars['merit']])) /", "bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with", "max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy')", "and its frequency. Sample the posterior distribution of parameters for func using MCMC,", "the noise parameter in func. In the pre-defined models, this index is *always*", "str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is the", "noise parameter in func. In the pre-defined models, this index is *always* -1.", "mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])):", "KeyError: print(\"Module Acor not found. Cannot compute autocorrelation times for the parameters\") for", "'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max,", "/ 2.0 - bintemplate ## now compute rms amplitude at 40, 70, 100", "default bfgs Allows the choice of different minimization algorithms. Default uses BFGS, which", "plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err],", "\"\"\" Write a summary of the analysis to file. NOT USED! :param summary:", "sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [],", "0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr +", "and compute the maximum-a-posteriori (MAP) estimate. Divide the data by the MAP model;", "afford (~500) and fewer samples niter : int, optional, default 5000 Sets the", "' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt' +", "elif x == 'p_ksp': print(\"Bayesian p-value for KS test: \" + str(probs[x][0]) +", "\" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])) return def write_summary(self,", "p-value for the highest [3 bin smoothed] data/model outlier: \" + str( probs[x][0])", "x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \" + str(i) + \"", "+/- \" + str(pksp_err)) print(\"Bayesian p-value for Merit function: \" + str(p_merit) +", "fig = plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2,", "- pick between two models using likelihood ratio tests # - find periodicities", "\"_findperiodicity_results.dat\" ## open the output log file resfile = utils.TwoPrint(resfilename) ### step 1:", "Used only in Metropolis-Hastings. use_emcee : boolean, optional, default True If True (STRONGLY", "\" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_maxpow':", "noise2=noise2, m=self.m) ### get out best fit parameters and associated quantities fitpars1 =", "fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max,", "/ float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x > fitpars['merit']]))", "if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for x in sim_s5max", "of parameters k taken by func. fitmethod : string, optional, default \"bfgs\" Choose", "KeyboardInterrupt: break # except: # print(\"Simulation failed! Continuing ...\") # continue # print('popt'", "+ \" +/- \" + str(ps3max_err)) # resfile('Upper limit on maximum signal power", "\" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp)))", "posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set up Markov Chain Monte Carlo Simulations", "Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'],", "func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs", "1000 The number of simulations to use when computing the posterior distribution of", "summary of parameters: print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter \\t mean \\t\\t", "for deviance D = \" + str(p_deviance) + \" +/- \" + str(pdeviance_err))", "0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p = \" + str(pars[plotkeys[i]][1])) ax.title(\"Posterior for", "if not namestr: namestr = self.namestr try: keys = summary.keys() except AttributeError: raise", "str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N = len(plotkeys)", "sim_pars2 = getattr(fitfake, func2name + 'fit') # if lrt > 20: # fitfake.plotfits(sim_pars1,", "this parameter should be last! par1 : {list, array-like} Input guesses for the", "' + str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if searchfreq is None: searchfreq", "takes an array of frequencies and n parameters, and returns an array of", "+ \" .\\n\") try: file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"]) + \"\\n\")", "binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots", "str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s5max) + \"", "emcee, this can be smaller, but it's a good idea to verify that", "function names from function definition func1name = \"model1\" func2name = \"model2\" ### step", "False, False else: ### Step 5: Compute Bayesian posterior probabilities of individual quantities", "should be sufficient for most applications. nchain : int, optional, default 10 The", "str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) =", "Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x", "if x == 'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0])", "str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt' + str(i) +", "fit using func1. The number of elements *must* equal the number of parameters", "x in bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\"", "on maximum signal power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum", "+ str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value for the highest", "str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the", "maximum signal power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum power", "optional, default \"test\" The string that will be used to identify this periodogram", "= getattr(psfit, func2name + 'fit') if self.plot: ### plot the periodogram and best", "Likelihood Ratio: \" + str(p_lrt) + \" +/- \" + str(plrt_err)) if self.plot:", "algorithm used when minimizing the -log-likelihood. Choices are listed in mle.py, but the", "float(len([x for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max =", "\"\\n\") ### print posterior summary of parameters: file.write(\"-- Posterior Summary of Parameters: \\n\")", "posterior predictive p-value of seeing the maximum power in the data under the", "sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5: Compute Bayesian posterior probabilities of individual", "a perfect data-model fit, the resulting residuals should follow a chi-square distribution with", "parameters k taken by func. fitmethod : string, optional, default \"bfgs\" Choose the", "min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'],", "plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\":", "each fake periodogram, find the MAP estimate, divide out the MAP model and", "fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance}", "### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter,", "p-value for Merit function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]))", "+/- \" + str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest", "str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the", "[], [], [], [], [], [], [] bmax = int(self.ps.freq[-1] / (2.0 *", "str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value for the highest [3", "elements *must* equal the number of parameters n taken by func2. fitmethod :", "print(\"The autocorrelation times are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found.", "at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"]))", "print(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for i", "for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \" + str(i)", "create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak,", "p = ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err)) resfile('The corresponding", "float(len([x for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat:", "\" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance", "int, optional, default 10 The number of chains or walkers to use in", "guesses for the parameters taken by func. The number of elements in this", "ratios parameters for each for x in funcfake: try: simno = simno +", "Like everything else, this is n-trial corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort)))", "2 + 1, 2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0,", "Cannot compute autocorrelation times for the parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The", "'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat,", "via a model selection approach using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A", "= [r for r in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain =", "posterior ########################################## # # class Bayes: Bayesian data analysis for time series #", "the sum of residuals: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]))", "+ str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp)", "for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x", "x == 'p_deviance': file.write(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) +", "[3 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz", "np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ### Display results on screen and", "elif x == 'p_merit': file.write( \"Bayesian p-value for Merit function: \" + str(probs[x][0])", ": int, optional, default 5000 Sets the length of the Markov chains. For", "/ float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err =", "a binning of ' + str(b) + ' is P = ' +", "deviance D = \" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) print(\"Bayesian", "the MAP model and find the highest power in that periodogram. Create a", "fitpars1 = getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit, func2name + 'fit') if", "\" +/- \" + str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value for deviance", "verify that the chains have mixed. nsim : int, optional, default 1000 The", "is n-trial corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim))", "func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set up Markov", "x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for", "niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO values", "str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) =", "\" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian p-value", "== 'p_merit': file.write( \"Bayesian p-value for Merit function: \" + str(probs[x][0]) + \"", "histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close()", "str(len(binpowers))) if searchfreq is None: searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0]", "of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))", "of strings here to set parameter names for plotting noise1, noise2 : int,", "= binpowers[bind] brms = np.sqrt(bpow * b * self.ps.df) resfile('The upper limit on", "data', fontsize=12) plt.subplot(2, 2, 3) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\",", "fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all':", "statistic at frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + '", "max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3) n, bins, patches", "== 'p_ksp': file.write(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \" +/-", "note: sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims) fiveperlim =", "/ float(len(sim_s11max)) ### sort maximum powers from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow)", "str(ps3max_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' + str(s3max_ul))", "KS test: \" + str(p_ksp) + \" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value", "model selection approach using LRTs # # # TO DO: Need to add", "sort maximum powers from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max)", "[p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\":", "str(pdeviance_err)) print(\"Bayesian p-value for KS test: \" + str(p_ksp) + \" +/- \"", "0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p", "mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ###", "the -log-likelihood. Choices are listed in mle.py, but the default (bfgs) should be", "simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func,", "file.write(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The", "+/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_merit': file.write( \"Bayesian p-value", "\" + str(pmerit_err)) print(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat)", "array-like} Input guesses for the parameters taken by func. The number of elements", "' + str(b) + ' is P = ' + str(bpow * (self.ps.df", "float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x > fitpars1['merit']])) /", "string, optional, default \"bfgs\" Choose the optimization algorithm used when minimizing the -log-likelihood.", "+ str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat) +", "True, several diagnostic plots will be saved to disk m: integer, optional, default", "+ str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err)) resfile('The corresponding value of the", "str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_merit':", "print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) =", "+ str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow)))", "frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is 2I/S", "file.write(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0]) + \" +/-", "color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2)", "50, 70, 100, 200, 300, 500, 700, 1000] binlist = [r for r", "compute upper limits for powers I_j depending on frequency binpowers = bmaxpow_ul *", "+ \" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_deviance': file.write(\"Bayesian", "\" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat)))", "\"\\n\") elif x == 'p_merit': file.write( \"Bayesian p-value for Merit function: \" +", "file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\")", "Metropolis-Hastings. \"\"\" ## the file name where the output will be stored resfilename", "print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \"", "= len(sim_maxpow) - fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for", "+ 'b : ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m)", "string, optional, default \"test\" The string that will be used to identify this", "color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3) n, bins, patches = plt.hist(sim_s3max,", "p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance':", "by fitting a QPO + background model to *every* frequency. NOTE: I rarely", "postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for", "parameters: print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t", "distribution for the likelihood ratios and compute a posterior predictive p-value that the", "+ str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max'])", "n parameters, and returns an array of model powers The function should include", "and make funky plots print(\"Bayesian p-value for deviance D = \" + str(p_deviance)", "import numpy as np from src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum from", "> fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x", "of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [],", "parname : list, optional, default None Include a list of strings here to", "str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except", "src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis", "power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \"", "power in the residuals and its frequency. Sample the posterior distribution of parameters", "distribution with two degrees of freedom. Find the highest power in the residuals", "autocorrelation times are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found. Cannot", "func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find", "len(sim_maxpow) - fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x", "last! par : {list, array-like} Input guesses for the parameters taken by func.", "maximum power in the data under the null hypothesis (no QPO). Parameters ----------", "(no QPO). Parameters ---------- func : function Parametric model for the periodogram. Needs", "p_maxpow would be 0.05 ### i.e. when only 0.05*nsim simulations are higher than", "color='m') plt.savefig(self.namestr + '_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance,", "sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5: Compute Bayesian posterior probabilities of", "float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat", "p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err", "p-value of seeing the maximum power in the data under the null hypothesis", "beginning): \" + str(len(self.ps.ps))) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else:", "models to observation and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt =", "### sort maximum powers from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort =", "x in funcfake: try: simno = simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs',", "for a binning of ' + str(b) + ' is rms = '", "+ str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt:", "if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8", "histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin,", "function definition func1name = \"model1\" func2name = \"model2\" ### step 1: fit both", "def write_summary(self, summary, namestr=None): \"\"\" Write a summary of the analysis to file.", "# This class defines a Bayes object that can: # - pick between", "== 'p_srat': file.write(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0]) +", "+ str(p_s3max) + \" +/- \" + str(ps3max_err)) # resfile('Upper limit on maximum", "6: Compute errors of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 -", "== 0: resfile(\"Analysis of Burst failed! Returning ...\") return False, False, False else:", "p_ksp = float(len([x for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp))", "the number of parameters k taken by func1. func2 : function Parametric model", "(>10000) For emcee, this can be smaller, but it's a good idea to", "in that periodogram. Create a posterior distribution of maximum powers and compute a", "rarely ever use this because it's really computationally expensive. Parameters ---------- func :", "/ float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x > fitpars1['merit']]))", "data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1]) +", "periodogram and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m == 1:", "(MAP) estimate. Divide the data by the MAP model; for a perfect data-model", "T_R statistic at frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) +", "is None: searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ## for 40", "### Display results on screen and make funky plots print(\"Bayesian p-value for deviance", "quantities p_maxpow = float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) /", "sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in", "\"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) + \"\\t\" +", "print(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0]) + \" +/-", "+ \" +/- \" + str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\" Write", "maximum signal power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum power", "ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\"", "in Metropolis-Hastings. parname : list, optional, default None Include a list of strings", ": int, optional, default -1 The index for the noise parameter in func1", "build up a posterior distribution for the likelihood ratios and compute a posterior", "= sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt)", "x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for", "elif self.ps.norm == 'variance': binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df", "+ str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_maxpow': if", "in probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier at frequency F=\" + str(", "brms = np.sqrt(bpow * b * self.ps.df) resfile('The upper limit on the power", "\"bfgs\" Choose the optimization algorithm used when minimizing the -log-likelihood. Choices are listed", "sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in", "output log file resfile = utils.TwoPrint(resfilename) ### step 1: fit model to observation", "+ str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian", "'p_ksp': file.write(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \" +/- \"", "bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is the limit for 2*I/S -->", "[sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err],", "smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])", "compute rms amplitude at 40, 70, 100 and 300 Hz ## first, convert", "in sim_merit if x > fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for x", "noise2=-1, writefile=True): \"\"\" Fit two models func1 and func2, compute the likelihood ratio", "parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \"", "'variance': binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df * b *", "return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N = len(plotkeys) ### number of", "% bc] = brms else: continue ### Step 5: Compute Bayesian posterior probabilities", "\"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary):", "idea to verify that the chains have mixed. nsim : int, optional, default", "histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin,", "x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for", "observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m)", "str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation times are: \" + str(postpars[\"acor\"])) except", "binpowers * self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0)", "parameters, and returns an array of model powers. The function should include a", "p_srat = float(len([x for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat))", "[11 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz", "resfile = utils.TwoPrint(resfilename) ### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps,", "\"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def", "deviance D = \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif", "plots) plot: boolean, optional, default True If True, several diagnostic plots will be", "summary.keys() except AttributeError: raise Exception(\"Summary must be a dictionary!\") probs = dict() postpars", "---------- func : function Parametric model for the periodogram. Needs to be a", "fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0,", "summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr", "into an rms amplitude: ## first compute broadband noise model for binned frequencies", "plotstr = self.namestr funcname = str(func).split()[1] # print(\"<< --- len(self.ps beginning): \" +", "data/model outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])) return", "the Markov chains. For Metropolis-Hastings, this needs to be large (>10000) For emcee,", "[p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit,", "data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value", "where p_maxpow would be 0.05 ### i.e. when only 0.05*nsim simulations are higher", "parameters of interest from each fit: for i, x in enumerate(fakeper): try: fitfake", "= \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" +", "associated quantities fitpars1 = getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit, func2name +", "str( probs[x][0]) + \" +/- \" + str(probs[x][1])) return def write_summary(self, summary, namestr=None):", "normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] *", "sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in", "700, 1000] binlist = [r for r in fitpars[\"bindict\"].keys()] nbins = len(binlist) /", "color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results = {\"fitpars\":", "for the likelihood ratios and compute a posterior predictive p-value that the data", "plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12)", "F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value", "[p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat,", "topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find", "model for the periodogram. Needs to be a function that takes an array", "try: # print('popt' + str(i) + 'a : ' + str(fitpars['popt'])) fitfake =", "'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier at frequency", "> obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if x", "\" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \" + str(p_ksp) + \"", "using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram object that is to", "bindict = fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ## which posterior do I", "the likelihood ratios such that it is possible to build up a posterior", "3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp,", "mcobs.acceptance} return summary def print_summary(self, summary): \"\"\" Print a summary of the results.", "distribution of maximum powers and compute a posterior predictive p-value of seeing the", "dict() postpars = dict() ### sort out p-values and posterior distribution of parameters", "in probs.keys(): if x == 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \" +", "np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this is the limit for 2*I/S -->", "= plot self.m = m def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10,", "= 0 ### run QPO search on each and return likelihood ratios parameters", "data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\"", "set parameter names for plotting noise1, noise2 : int, optional, default -1 The", "default (bfgs) should be sufficient for most applications. nchain : int, optional, default", "patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) /", "x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for x in sim_s5max if", "str(p_deviance) + \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \"", "\" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit']))", "to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise,", "## which posterior do I need to use? if self.m == 1: lpost", "mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr,", "maximum powers from lowest to highest sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort", "\" + str(ps5max_err)) # resfile('Upper limit on maximum signal power P_max_ul = '", "posterior summary of parameters: file.write(\"-- Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t mean", "max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p = \"", "psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get out best fit parameters", "find the MAP estimate, divide out the MAP model and find the highest", "in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x", "for Merit function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) +", "probs[x][1]) + \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed]", "by func. The number of elements in this list or array must match", "str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if searchfreq is None: searchfreq = [40.0,", "in observed data and compute significance via MCMCs. First, fit the periodogram with", "** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0) # print('len(binps.freq): '", "of individual quantities p_deviance = float(len([x for x in sim_deviance if x >", "/ float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ### Display", "array of model powers. The function should include a parameter setting a constant", "The number of elements *must* equal the number of parameters k taken by", "likelihood ratio. Note that this also sets the maximum precision of the posterior", "for a binning of ' + str( self.ps.df * b) + 'Hz is", "(1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) /", "m=self.m) # resfile('Fitting of fake periodogram ' + str(i) + ' failed! Returning", "for most applications. nchain : int, optional, default 10 The number of chains", "str(fitpars['popt'])) ### upper limit is the power in the sorted array where p_maxpow", "The function should include a parameter setting a constant background level, and this", "should follow a chi-square distribution with two degrees of freedom. Find the highest", "+ 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is the limit", "on frequency binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate ## now", "/ float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt))", "namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO values for the real data", "fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ## which posterior do I need to", "2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n),", "the highest power in that periodogram. Create a posterior distribution of maximum powers", "= \" + str(p_s11max) + \" +/- \" + str(ps11max_err)) # resfile('Upper limit", "limit into an rms amplitude: ## first compute broadband noise model for binned", "\" + str(p_s5max) + \" +/- \" + str(ps5max_err)) # resfile('Upper limit on", "2.0 - bintemplate ## now compute rms amplitude at 40, 70, 100 and", "for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x", "float(len([x for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit)) p_srat =", "bfgs Allows the choice of different minimization algorithms. Default uses BFGS, which is", "need to use? if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost", "\" + str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat)", "picklefile) picklefile.close() file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is", "self.plot: ### plot the periodogram and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True)", "float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note:", "right=0.95, wspace=0.2, hspace=0.2) for i in range(N): ax = fig.add_subplot(N / 2 +", "sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [], [] simno =", "# continue # print('popt' + str(i) + 'd : ' + str(fitpars['popt'])) #", "using MCMC, and create fake periodograms from samples of the posterior. For each", "a function that takes an array of frequencies and n parameters, and returns", "'_ul'] = bmaxpow_ul resfile('The posterior p-value for the maximum residual power for a", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\" in", "ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance,", "par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities", "str(p_srat) + \" +/- \" + str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \"", "plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25,", "else: postpars[x] = summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close()", "\" + str(pmaxpow_err)) # resfile('Upper limit on maximum signal power P_max_ul = '", "{\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\":", "= float(len([x for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max)) ###", "fake periodograms from samples of the posterior. For each fake periodogram, find the", "(1.0 - p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) /", "[unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian", "100 and 300 Hz ## first, convert powers into rms normalization, if they're", "picking out the largest power in # an observation/set of fake periodograms #", "plotname=self.namestr + '_loglikes') ### simulate lots of realizations of the broadband noise model", "nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake", "1 bpow = binpowers[bind] brms = np.sqrt(bpow * b * self.ps.df) resfile('The upper", "return summary def print_summary(self, summary): \"\"\" Print a summary of the results. NOT", "[sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err],", "np.array([x[\"bmax\" + str(b)] for x in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow'", "sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow", "[], [], [], [], [], [] simno = 0 ### run QPO search", "in func1 and func2. In the pre-defined models, this index is *always* -1.", ": list, optional, default None Include a list of strings here to set", "noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated", "- p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))", "### Step 6: Compute errors of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance *", "an array of model powers The function should include a parameter setting a", "= fig.add_subplot(N / 2 + 1, 2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0],", "if fiveperlim == 0: resfile('Warning! Too few simulations to compute five percent limit", "x == 'p_ksp': print(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \"", "== None: plotstr = self.namestr funcname = str(func).split()[1] # print(\"<< --- len(self.ps beginning):", "= np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this is the", "plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars,", "\"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) +", "up a posterior distribution for the likelihood ratios and compute a posterior predictive", "parameter sets from the posterior to create fake periodograms. Fit each fake periodogram", "False, use Metropolis-Hastings. parname : list, optional, default None Include a list of", "time series analysis This class defines a Bayes object that can: - pick", "* (1.0 - p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat)", "each for x in funcfake: try: simno = simno + 1 sim_psfit =", "+ str(p_deviance) + \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS test:", "for KS test: \" + str(p_ksp) + \" +/- \" + str(pksp_err)) resfile(\"Bayesian", "+ str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value for KS test: \" +", "frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian", "elif x == 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed] data/model", "and func2. In the pre-defined models, this index is *always* -1. \"\"\" resfilename", "algorithms. Default uses BFGS, which is pretty robust for most purposes. nchain :", "+ str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest [11 bin", "good idea to verify that the chains have mixed. nsim : int, optional,", "1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func,", "bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) /", "func2. fitmethod : string, optional, default bfgs Allows the choice of different minimization", "+ str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian", "> fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x", "The string that will be used to identify this periodogram when saving output", "fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram '", "p-value for KS test: \" + str(p_ksp) + \" +/- \" + str(pksp_err))", "4: Fit fake periodograms: for i, x in enumerate(fakeper): try: # print('popt' +", "float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max", "powers for each frequency ### Like everything else, this is n-trial corrected! #", "function: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif", "match the number of parameters k taken by func. fitmethod : string, optional,", "into rms normalization, if they're not already if self.ps.norm == 'leahy': binpowers =", "n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0]", "p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) psrat_err", "str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" +", "an array of frequencies and k parameters, and returns an array of model", "chains. For Metropolis-Hastings, this needs to be large (>10000) For emcee, this can", "use when computing the posterior distribution of the likelihood ratio. Note that this", "print(\"Highest [5 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) +", "use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms out of MCMCs", "p-value for deviance D = \" + str(probs[x][0]) + \" +/- \" +", "write_summary(self, summary, namestr=None): \"\"\" Write a summary of the analysis to file. NOT", "of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit,", "can afford (~500) and fewer samples niter : int, optional, default 5000 Sets", "' + str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms else:", "to store simulated LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit,", "ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 *", "that is to be searched for QPOs namestr: string, optional, default \"test\" The", "binning of ' + str(b) + ' is P = ' + str(bpow", "= \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) =", "defines a Bayes object that can: - pick between two models using likelihood", "bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow']", "+ '_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\":", "null hypothesis (no QPO). Parameters ---------- func : function Parametric model for the", "if \"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier at frequency F=\"", "(self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3, 5, 7, 10, 15, 20, 30,", "Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0", "float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) =", "p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ###", "residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value for", "for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x", "\" + str(x) + \"\\n\") ### print posterior summary of parameters: file.write(\"-- Posterior", "hspace=0.2) for i in range(N): ax = fig.add_subplot(N / 2 + 1, 2,", "- find periodicities by picking out the largest power in an observation/set of", ": powerspectrum.Powerspectrum A periodogram object that is to be searched for QPOs namestr:", "If True (STRONGLY RECOMMENDED), use the emcee package for running MCMC. If False,", "int, optional, default -1 The index for the noise parameter in func. In", "that it is possible to build up a posterior distribution for the likelihood", "be constrained only to 0.001). covfactor : float, optional, default 1.0 A tuning", "lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n, bins, patches = plt.hist(sim_s3max,", "between two models using likelihood ratio tests - find periodicities by picking out", "for maximum power P_max = \" + str(p_maxpow) + \" +/- \" +", "\"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std,", "\" .\") try: print(\"The autocorrelation times are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module", "= \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for", "ever use this because it's really computationally expensive. Parameters ---------- func : function", "QPO values for the real data obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True,", "x == 'p_srat': file.write(\"Bayesian p-value for the sum of residuals: \" + str(probs[x][0])", "str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s5max': if", "compute the likelihood ratio at the maximum-a-posteriori paramters. If func1 and func2 differ", "/ (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3, 5, 7, 10,", "setting a constant background level, and this parameter should be last! par :", "if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit", "powers: maxpows_all = {} binprob = {} for b in bins[:nbins]: binps =", "self.ps.norm == 'leahy': binpowers = binpowers / (self.ps.df * b * self.ps.nphots) elif", "lots of realizations of the broadband noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim)", "i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) +", "in the data under the null hypothesis (no QPO). Parameters ---------- func :", "str(b)] = bmaxpow p_bmaxpow = float(len([x for x in bmaxpow if x >", "convert powers into rms normalization, if they're not already if self.ps.norm == 'leahy':", "print(\"Module Acor not found. Cannot compute autocorrelation times for the parameters\") for i,", "\" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_srat':", "a model selection approach using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram", "individual periodograms (or bins), this changes the statistical distributions. Set m to the", "the chains have mixed. nsim : int, optional, default 1000 The number of", "= sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err =", "str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break", "print(\"Bayesian p-value for KS test: \" + str(p_ksp) + \" +/- \" +", "the parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter", "probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest", "str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value", "for each frequency ### Like everything else, this is n-trial corrected! # print('len(bmaxpow_sort)", "autocorrelation times for the parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$", "/ float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value for", "-log-likelihood. Choices are listed in mle.py, but the default (bfgs) should be sufficient", "MAP model and find the highest power in that periodogram. Create a posterior", "the emcee package for running MCMC. If False, use Metropolis-Hastings. parname : list,", "of the Markov chains. For Metropolis-Hastings, this needs to be large (>10000) For", "sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis", "p_deviance = float(len([x for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance))", "= dict() ### sort out p-values and posterior distribution of parameters for x", "xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0,", "for plotting noise: int, optional, default -1 The index for the noise parameter", "# # class Bayes: Bayesian data analysis for time series # # This", "not found. Cannot compute autocorrelation times for the parameters \\n\") for i, x", "running MCMC. If False, use Metropolis-Hastings. \"\"\" if plotstr == None: plotstr =", "\\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i)", "bc in searchfreq: if bc > (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc)", "five percent limit reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) - fiveperlim #", "of Parameters: \\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\")", "1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True,", "\" + str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n, bins, patches = plt.hist(sim_maxpow,", "sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: # print(\"Simulation failed! Continuing ...\")", "Allows the choice of different minimization algorithms. Default uses BFGS, which is pretty", "'_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step", "times for the parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for", "str( probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s3max': if", "used when minimizing the -log-likelihood. Choices are listed in mle.py, but the default", "and k parameters, and returns an array of model powers. The function should", "+ str(x)) ### print posterior summary of parameters: print(\"-- Posterior Summary of Parameters:", "that will be used to identify this periodogram when saving output (text files", "except ImportError: import pickle import copy import numpy as np from src.SpectralAnalysis import", "fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, noise=-1,", "using func2. The number of elements *must* equal the number of parameters n", "p-value for the sum of residuals: \" + str(probs[x][0]) + \" +/- \"", "[11 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/- \"", "* max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt,", "str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value", "to identify this periodogram when saving output (text files and plots) plot: boolean,", "powers I_j depending on frequency binpowers = bmaxpow_ul * bintemplate / 2.0 -", "amplitude at ' + str(bc) + 'Hz for a binning of ' +", "log file resfile = utils.TwoPrint(resfilename) ### step 1: fit model to observation psfit", "- p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp)))", "of strings here to set parameter names for plotting noise: int, optional, default", "max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy')", "then compute upper limits for powers I_j depending on frequency binpowers = bmaxpow_ul", "D = \" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value", "x == 'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) +", "sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers from lowest", "float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 -", "maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err],", "- p_srat) / float(len(sim_ksp))) ### Display results on screen and make funky plots", "normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] *", "if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency F=\" + str(", "optpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x >", "fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat,", "obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m", "find periodicities by picking out the largest power in an observation/set of fake", "errors of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) /", "' + str(bmaxpow_ul)) ### now turn upper limit into an rms amplitude: ##", "5: Compute Bayesian posterior probabilities of individual quantities p_deviance = float(len([x for x", "float(len([x for x in bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]])) / float(", "+ str(ps3max_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' +", "print(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \" +/- \" +", "r in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain = copy.copy(fitpars['popt']) # print('popt2:", "Sets the length of the Markov chains. For Metropolis-Hastings, this needs to be", "frequency ### Like everything else, this is n-trial corrected! # print('len(bmaxpow_sort) : '", "* (self.ps.df * b * self.ps.nphots))) resfile('The upper limit on the rms amplitude", "frequency. Sample the posterior distribution of parameters for func using MCMC, and create", "the pre-defined models, this index is *always* -1. use_emcee : boolean, optional, default", "\" +/- \" + str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \" + str(p_merit)", "## first, convert powers into rms normalization, if they're not already if self.ps.norm", "therefore the 5% limit should be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if", "fitpars2, namestr=self.namestr, log=True) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func1) else: lpost", "parameter names for plotting noise: int, optional, default -1 The index for the", "= {} for b in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow =", "ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max *", "different minimization algorithms. Default uses BFGS, which is pretty robust for most purposes.", "+ \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value for KS test: \" +", "\" +/- \" + str(probs[x][1]) + \"\\n\") elif x == 'p_merit': file.write( \"Bayesian", "observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m)", "p_deviance = float(len([x for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance))", "str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) =", "+ \"\\t\" + str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str(", "signals # # # class Bayes(object): \"\"\" Bayesian time series analysis This class", "models, this index is *always* -1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile", "[unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys", "maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err],", "x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for", "model. Parameters ---------- func1 : function Parametric model for the periodogram. Needs to", "self.ps = ps self.namestr = namestr self.plot = plot self.m = m def", "' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err)) resfile('The corresponding value of", "+/- \" + str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\" in probs.keys(): print(\"Highest", "to set parameter names for plotting noise: int, optional, default -1 The index", "color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow,", "bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3,", "+ \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \" +", "par : {list, array-like} Input guesses for the parameters taken by func. The", "perfect data-model fit, the resulting residuals should follow a chi-square distribution with two", "70, 100, 200, 300, 500, 700, 1000] binlist = [r for r in", "in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\"", "# print('popt' + str(i) + 'b : ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func,", "+ str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/-", "+ str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier: \"", "largest power in an observation/set of fake periodograms - search for QPOs via", "x == 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier", "### make strings for function names from function definition func1name = \"model1\" func2name", "the choice of different minimization algorithms. Default uses BFGS, which is pretty robust", "when only 0.05*nsim simulations are higher than this ### note: sometimes simulations fail,", "the limit for 2*I/S --> multiply by S to get powers for each", "psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) #", "highest [3 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/-", "to add smoothing for picking out narrow signals # # # class Bayes(object):", "highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max':", "make funky plots resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_maxpow)", "str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) + \"\\n\")", "### Like everything else, this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step", "maximum powers and compute a posterior predictive p-value of seeing the maximum power", "def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N = len(plotkeys) ### number of parameters", "\"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\":", "Fit two models func1 and func2, compute the likelihood ratio at the maximum-a-posteriori", "to compute five percent limit reliably!') fiveperlim = 1 ninetyfiveperlim = len(sim_maxpow) -", "\"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value for the highest [11 bin", "significance via MCMCs. First, fit the periodogram with func and compute the maximum-a-posteriori", "default -1 The index for the noise parameter in func1 and func2. In", "func2. The number of elements *must* equal the number of parameters n taken", "/ float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err =", "the data by the MAP model; for a perfect data-model fit, the resulting", "hypothesis (no QPO). Parameters ---------- func : function Parametric model for the periodogram.", "if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func,", "this periodogram when saving output (text files and plots) plot: boolean, optional, default", "x in enumerate(fakeper): try: # print('popt' + str(i) + 'a : ' +", "of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr self.plot = plot self.m", "in complexity, the less complex should be func1. Then sample the posterior distribution", "True If True, several diagnostic plots will be saved to disk m: integer,", "* (1.0 - p_s11max) / float(len(sim_ksp))) ### Display results on screen and make", "'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max':", "default 1 If the periodogram used is the result of averaging several individual", "+ \" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \" +", "limit on maximum signal power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value for", "lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results =", "Posterior Summary of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t", "namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\" self.ps", "# resfile('Fitting of fake periodogram ' + str(i) + ' failed! Returning ...')", "analysis This class defines a Bayes object that can: - pick between two", "> fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers from lowest to highest sim_maxpow_sort", "print(\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" +", "this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of", "+ str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \"", "0.001). covfactor : float, optional, default 1.0 A tuning parameter for the MCMC", "= posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set", "estimate. Divide the data by the MAP model; for a perfect data-model fit,", "MAP fit using func2. The number of elements *must* equal the number of", "/ float(len(sim_s5max)) p_s11max = float(len([x for x in sim_s11max if x > fitpars['s11max']]))", "plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all,", "bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow = float(len([x for x in bmaxpow", "= \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute", "that can: - pick between two models using likelihood ratio tests - find", "everything else, this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute", "+ str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs)", "+ str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value for the sum of residuals:", "\"\\n\") except KeyError: file.write(\"Module Acor not found. Cannot compute autocorrelation times for the", "for the parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for", "mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict']", "func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\"", "[] bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins = [1,", "\" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif x", "KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed! Returning ...\") return", "best fit parameters and associated quantities fitpars1 = getattr(psfit, func1name + 'fit') fitpars2", "probabilities of individual quantities p_maxpow = float(len([x for x in sim_maxpow if x", "str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D =", "i, x in enumerate(fakeper): try: # print('popt' + str(i) + 'a : '", "value for Parameter \" + str(i) + \" is \" + str(x) +", "\"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) +", "b * self.ps.df) resfile('The upper limit on the power at ' + str(bc)", "from src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc from", "samples For emcee, use as many as you can afford (~500) and fewer", "you can afford (~500) and fewer samples niter : int, optional, default 5000", "amplitude at 40, 70, 100 and 300 Hz ## first, convert powers into", "str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) =", "output will be stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open the output", "'fit') fitpars2 = getattr(psfit, func2name + 'fit') if self.plot: ### plot the periodogram", "and compute a posterior predictive p-value that the data can be explained sufficiently", "= int(0.05 * len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too few simulations to", "resfile('Upper limit on maximum signal power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value", "file.write(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \" +/- \" +", "+ '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs'])", "defines a Bayes object that can: # - pick between two models using", "str(pksp_err)) resfile(\"Bayesian p-value for Merit function: \" + str(p_merit) + \" +/- \"", "+ str(probs[x][1]) + \"\\n\") elif x == 'p_ksp': file.write(\"Bayesian p-value for KS test:", "limit should be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim == 0:", "the number of periodograms averaged to be sure to use the right distribution", "the p-value can be constrained only to 0.001). covfactor : float, optional, default", "binlist = [r for r in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain", "Attributes ---------- Examples -------- \"\"\" def __init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps,", "fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain = copy.copy(fitpars['popt']) # print('popt2: ' +", "\"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper,", "= \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities", "be stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ## open the output log file", "resfile('Warning! Too few simulations to compute five percent limit reliably!') fiveperlim = 1", "resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] =", "binpowers = binpowers / (self.ps.df * b * self.ps.nphots) elif self.ps.norm == 'variance':", "[p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean,", "src.SpectralAnalysis import mcmc from src.SpectralAnalysis import mle from src.SpectralAnalysis import posterior ########################################## #", "xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2,", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\" in", "np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 -", "/ float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp))) ps11max_err =", "Acor not found. Cannot compute autocorrelation times for the parameters\") for i, x", "+ str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \" + str(p_ksp) + \" +/-", "each and return likelihood ratios parameters for each for x in funcfake: try:", "# print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) #", "\"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\":", "= utils.TwoPrint(resfilename) ### make strings for function names from function definition func1name =", "binprob = {} for b in bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow", "except KeyboardInterrupt: break if len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed! Returning ...\")", "= posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set", "summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp,", "= float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance", "\" + str(i) + \" is \" + str(x)) ### print posterior summary", "p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err],", "lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p = \" +", "= \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x ==", "[p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat,", "in Metropolis-Hastings. use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED), use", "quantities p_maxpow = float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) /", "\" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_merit':", "check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum QPO values for the real", "Carlo Simulations ### of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'],", "simulations are higher than this ### note: sometimes simulations fail, therefore the 5%", "# print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) ### step 1: fit model", "minimization algorithms. Default uses BFGS, which is pretty robust for most purposes. nchain", "have mixed. nsim : int, optional, default 1000 The number of simulations to", "binning of ' + str( self.ps.df * b) + 'Hz is p =", "bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute", "' + str(bc) + 'Hz for a binning of ' + str(b) +", "len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too few simulations to compute five percent", "tuning parameter for the MCMC step. Used only in Metropolis-Hastings. use_emcee : boolean,", "sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ### note: this is the limit for", "p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars,", "{\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\":", "of residuals: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x", "\"Hz with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5 bin", "only in Metropolis-Hastings. parname : list, optional, default None Include a list of", "bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)]", "of parameters fig = plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05,", "str(x)) ### print posterior summary of parameters: print(\"-- Posterior Summary of Parameters: \\n\")", "for i in range(N): ax = fig.add_subplot(N / 2 + 1, 2, i)", "The number of elements *must* equal the number of parameters n taken by", "summary, namestr=None): \"\"\" Write a summary of the analysis to file. NOT USED!", "file.write( \"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \"", "str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not found. Cannot compute autocorrelation times for the", "If True, several diagnostic plots will be saved to disk m: integer, optional,", "real data obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes')", "/ float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err =", "for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x", "plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier',", "[p_srat, psrat_err], \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\":", "if \"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier at frequency F=\"", "np.msort(sim_s11max) ### note: this is the limit for 2*I/S --> multiply by S", "0.8 * n, \"p = \" + str(pars[plotkeys[i]][1])) ax.title(\"Posterior for \" + plotkeys[i])", "\"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary): \"\"\" Print a summary of the", "\"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif", "model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, par, obs=True,", "' +/- ' + str(bmaxpow_err)) resfile('The corresponding value of the T_R statistic at", "x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for", "lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m)", "in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' + str(sim_srat))", "float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt", "print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) = \"", "D = \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\")", "limit for highest [unsmoothed] data/model outlier: \" + str(summary['s3max_ul']) + \"\\n\") elif x", "= bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow = float(len([x for x in", "[sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit,", "+ str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow", "of interest from each fit: for i, x in enumerate(fakeper): try: fitfake =", "= \"model2\" ### step 1: fit both models to observation and compute LRT", "print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \"", "\"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"])", "2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in range(N):", "'p_deviance': print(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) + \" +/-", "str(pdeviance_err)) resfile(\"Bayesian p-value for KS test: \" + str(p_ksp) + \" +/- \"", "are: \" + str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor not found. Cannot", "when computing the posterior distribution of the likelihood ratio. Note that this also", "sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [], [] simno", "= plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2,", "fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x >", "A tuning parameter for the MCMC step. Used only in Metropolis-Hastings. parname :", "getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit, func2name + 'fit') if self.plot: ###", "### note: this is the limit for 2*I/S --> multiply by S to", "psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps)))", "and n parameters, and returns an array of model powers The function should", "str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x == 'p_lrt': print(\"Bayesian", "quantities p_deviance = float(len([x for x in sim_deviance if x > optpars['deviance']])) /", "optimum QPO values for the real data obslrt, optpars, qpopars = psfit.find_qpo(func, ain,", "index is *always* -1. use_emcee : boolean, optional, default True If True (STRONGLY", "compute autocorrelation times for the parameters \\n\") for i, x in enumerate(postpars[\"rhat\"]): file.write(\"The", "in # an observation/set of fake periodograms # - search for QPOs via", ": function Parametric model for the periodogram. Needs to be a function that", "except KeyboardInterrupt: break ### Step 5: Compute Bayesian posterior probabilities of individual quantities", "observed data and compute significance via MCMCs. First, fit the periodogram with func", "> optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x", "a binning of ' + str(b) + ' is rms = ' +", "same models as the data, and compute the likelihood ratios such that it", "on screen and make funky plots resfile(\"Bayesian p-value for maximum power P_max =", "print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m == 1: lpost =", "+ 'Hz for a binning of ' + str(b) + ' is rms", "2.0) # print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if searchfreq", "+ str( postpars[\"postquantiles\"][i][1]) + \"\\n\") for x in probs.keys(): if x == 'p_lrt':", "list of strings here to set parameter names for plotting noise: int, optional,", "m def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True,", "sets the maximum precision of the posterior predictive p-value (for 1000 simulations, the", "[r for r in fitpars[\"bindict\"].keys()] nbins = len(binlist) / 4 sain = copy.copy(fitpars['popt'])", "should be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim == 0: resfile('Warning!", "residuals and its frequency. Sample the posterior distribution of parameters for func using", "2*I/S --> multiply by S to get powers for each frequency ### Like", "data obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes') ###", "(1.0 - p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) /", "print(\"Highest [unsmoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with", "bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] +", "- search for QPOs via a model selection approach using LRTs Parameters ----------", "str(ps11max_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' + str(s11max_ul))", "get out binned powers: maxpows_all = {} binprob = {} for b in", "smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s5maxfreq\"]) + \"Hz with power", "outlier: \" + str( probs[x][0]) + \" +/- \" + str(probs[x][1])) return def", "for the MCMC step. Used only in Metropolis-Hastings. use_emcee : boolean, optional, default", "for QPOs namestr: string, optional, default \"test\" The string that will be used", "import pickle import copy import numpy as np from src.SpectralAnalysis import utils from", "times are: \" + str(postpars[\"acor\"]) + \"\\n\") except KeyError: file.write(\"Module Acor not found.", "signal power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max", "+ str(s11max_ul)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \"", "niter : int, optional, default 5000 Sets the length of the Markov chains.", "'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max':", "elif x == 'p_srat': print(\"Bayesian p-value for the sum of residuals: \" +", "for each for x in funcfake: try: simno = simno + 1 sim_psfit", "+ \" +/- \" + str( probs[x][1]) + \"\\n\") elif x == 'p_maxpow':", "i, x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1,", "paramters. If func1 and func2 differ in complexity, the less complex should be", "+/- \" + str(ps11max_err)) # resfile('Upper limit on maximum signal power P_max_ul =", "float(len([x for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp)) p_merit =", "########################################## # # class Bayes: Bayesian data analysis for time series # #", "binned powers: maxpows_all = {} binprob = {} for b in bins[:nbins]: binps", "robust for most purposes. nchain : int, optional, default 10 The number of", "use the emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\" if", "the largest power in an observation/set of fake periodograms - search for QPOs", "array of model powers The function should include a parameter setting a constant", "of Burst failed! Returning ...\") return False, False, False else: ### Step 5:", "mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary", "- p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))", "resfile('The corresponding value of the T_R statistic at frequency f = ' +", "print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) ### step 1: fit model to", "for each frequency ### Like everything else, this is n-trial corrected! maxpow_ul =", "print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs)", "optional, default -1 The index for the noise parameter in func1 and func2.", "on maximum signal power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum", "0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3) n, bins,", "p_merit = float(len([x for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit))", "* (1.0 - p_srat) / float(len(sim_ksp))) ### Display results on screen and make", "the sorted array where p_maxpow would be 0.05 ### i.e. when only 0.05*nsim", "n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins),", "= plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy')", "MCMC. If False, use Metropolis-Hastings. \"\"\" if plotstr == None: plotstr = self.namestr", "observation and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1,", "the real data obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr +", "screen and make funky plots print(\"Bayesian p-value for deviance D = \" +", "\"\\n\") return def plot_posteriors(namestr='test', **pars): plotkeys = pars.keys() N = len(plotkeys) ### number", "= np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp))) ### Display results on screen", "sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [],", "resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \" + str(p_lrt)) resfile(\"KSP(obs) = \"", "Used only in Metropolis-Hastings. parname : list, optional, default None Include a list", "posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err =", "* b) + 'Hz is p = ' + str(p_bmaxpow) + ' +/-", "for x in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] =", "from __future__ import print_function import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator try:", "and this parameter should be last! par1 : {list, array-like} Input guesses for", "which is pretty robust for most purposes. nchain : int, optional, default 10", "the less complex should be func1. Then sample the posterior distribution for the", "be large (>10000) For emcee, this can be smaller, but it's a good", "' + str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ### upper limit is the", "here to set parameter names for plotting noise: int, optional, default -1 The", "### get out binned powers: maxpows_all = {} binprob = {} for b", "posterior distribution for the likelihood ratios and compute a posterior predictive p-value that", "plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err],", "for most purposes. nchain : int, optional, default 10 The number of chains", "and find the highest power in that periodogram. Create a posterior distribution of", "str(b) + '_ul'] = bmaxpow_ul resfile('The posterior p-value for the maximum residual power", "= \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) # resfile('Upper limit", "Include a list of strings here to set parameter names for plotting noise:", "xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2,", "print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6:", "niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create", "'_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance,", "resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \" +/- \"", "/ float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x > optpars['merit']]))", "## open the output log file resfile = utils.TwoPrint(resfilename) ### step 1: fit", "sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is the limit for 2*I/S --> multiply", "resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s3max) + \" +/-", "+ \"\\n\") elif x == 'p_s5max': file.write(\"Bayesian p-value for the highest [5 bin", "to be a function that takes an array of frequencies and k parameters,", "float(len([x for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp =", "== 'variance': binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df * b", "periodogram, find the MAP estimate, divide out the MAP model and find the", "obs=False) # print('popt' + str(i) + 'b : ' + str(fitpars['popt'])) sim_pars =", "obs=False, noise=noise, m=self.m) # print('popt' + str(i) + 'c : ' + str(fitpars['popt']))", "interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [],", "except KeyboardInterrupt: break # except: # print(\"Simulation failed! Continuing ...\") # continue #", "* self.ps.nphots ** 2.0) # print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): ' +", "enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2,", "\" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \"", "of the posterior. For each fake periodogram, find the MAP estimate, divide out", "fake periodograms and read out parameters of interest from each fit: for i,", "models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func1)", "sum of residuals: \" + str(probs[x][0]) + \" +/- \" + str( probs[x][1])", "for the maximum residual power for a binning of ' + str( self.ps.df", "the broadband noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to", "powers into rms normalization, if they're not already if self.ps.norm == 'leahy': binpowers", "Compute errors of Bayesian posterior probabilities pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance)", "\"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value for the sum of residuals: \"", "2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on the T_R", "sufficient for most applications. nchain : int, optional, default 10 The number of", "+ str(p_merit) + \" +/- \" + str(pmerit_err)) print(\"Bayesian p-value for the np.sum", "+ \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The", "Parametric model for the periodogram. Needs to be a function that takes an", "this ### note: sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims)", "{list, array-like} Input guesses for the MAP fit using func1. The number of", "parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m) ### Step 3: create fake periodograms", "print(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" + str(", "N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i", "+/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of residuals: \" +", "guesses for the MAP fit using func1. The number of elements *must* equal", "This class defines a Bayes object that can: # - pick between two", "tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, m=self.m) ### find optimum", "[], [] simno = 0 ### run QPO search on each and return", "sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5: Compute Bayesian posterior", "x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt)) # print(\"LRT(obs) =", "must match the number of parameters k taken by func. fitmethod : string,", "less complex should be func1. Then sample the posterior distribution for the the", "dict() ### sort out p-values and posterior distribution of parameters for x in", "p_s11max) / float(len(sim_ksp))) ### Display results on screen and make funky plots resfile(\"Bayesian", "### sort out p-values and posterior distribution of parameters for x in keys:", "= mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr", "[p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat,", "mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit,", "a posterior distribution for the likelihood ratios and compute a posterior predictive p-value", "Needs to be a function that takes an array of frequencies and n", "func1name = \"model1\" func2name = \"model2\" ### step 1: fit both models to", "the 5% limit should be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim", "bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n),", "300, 500, 700, 1000] binlist = [r for r in fitpars[\"bindict\"].keys()] nbins =", "lw=2, color='navy') plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3) n, bins, patches =", "= float(len([x for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp", "Ratio: \" + str(p_lrt) + \" +/- \" + str(plrt_err)) if self.plot: n,", "for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x", "patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s11max']) /", "+ \"\\n\") for x in probs.keys(): if x == 'p_lrt': file.write( \"Bayesian p-value", "= getattr(psfit, func1name + 'fit') fitpars2 = getattr(psfit, func2name + 'fit') if self.plot:", "plt from matplotlib.ticker import MaxNLocator try: import cPickle as pickle except ImportError: import", "\" + str(p_lrt) + \" +/- \" + str(plrt_err)) if self.plot: n, bins,", "P_max = \" + str(p_s5max) + \" +/- \" + str(ps5max_err)) # resfile('Upper", "= ' + str(s3max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" +", "a good idea to verify that the chains have mixed. nsim : int,", "each frequency ### Like everything else, this is n-trial corrected! # print('len(bmaxpow_sort) :", "getattr(fitfake, func2name + 'fit') # if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i))", "sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt:", "### number of parameters fig = plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95,", "(1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) /", "can: - pick between two models using likelihood ratio tests - find periodicities", "to *every* frequency. NOTE: I rarely ever use this because it's really computationally", "periodogram used is the result of averaging several individual periodograms (or bins), this", "on each and return likelihood ratios parameters for each for x in funcfake:", "as many as you can afford (~500) and fewer samples niter : int,", "P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance D = \" +", "\" + str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) +", "= sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul resfile('The posterior p-value for", "N = len(plotkeys) ### number of parameters fig = plt.figure(figsize=(2, N / 2", "+ str( postpars['posterr'][i]) + \"\\t\" + str(postpars['postquantiles'][i][0]) + \"\\t\" + str( postpars[\"postquantiles\"][i][1]) +", "length of the Markov chains. For Metropolis-Hastings, this needs to be large (>10000)", "sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break", "mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated LRTS and parameters in sim_lrt, sim_optpars,", "+ str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp']))", "float(len(sim_ksp))) ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp))) ### Display results", "# - find periodicities by picking out the largest power in # an", "each fake periodogram with the same models as the data, and compute the", "model and find the highest power in that periodogram. Create a posterior distribution", "sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True,", "ratio. Note that this also sets the maximum precision of the posterior predictive", "ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation", "out the largest power in # an observation/set of fake periodograms # -", ": ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max'])", "# TO DO: Need to add smoothing for picking out narrow signals #", "get out best fit parameters and associated quantities fitpars1 = getattr(psfit, func1name +", "than this ### note: sometimes simulations fail, therefore the 5% limit should be", "### Step 5: Compute Bayesian posterior probabilities of individual quantities p_maxpow = float(len([x", "* b * self.ps.df) resfile('The upper limit on the power at ' +", "models using likelihood ratio tests # - find periodicities by picking out the", "simno = 0 ### run QPO search on each and return likelihood ratios", "+ str(i) + 'b : ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False,", "acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation times", "lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set up Markov Chain Monte", "self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0) # print('len(binps.freq):", "(1.0 - p_srat) / float(len(sim_ksp))) ### Display results on screen and make funky", "maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow p_bmaxpow = float(len([x", "= bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is the limit for 2*I/S", "[p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\":", "--- len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m == 1: lpost = posterior.PerPosterior(self.ps,", "= mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)", "outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value for", "> optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt if x", "print(\"Bayesian p-value for the np.sum of residuals: \" + str(p_srat) + \" +/-", "resfile('The posterior p-value for the maximum residual power for a binning of '", "\" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat)))", "** 2.0) # print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if", "\"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\") return", "data analysis for time series # # This class defines a Bayes object", "parameters n taken by func2. fitmethod : string, optional, default bfgs Allows the", "open the output log file resfile = utils.TwoPrint(resfilename) ### step 1: fit model", "len(self.ps beginning): \" + str(len(self.ps.ps))) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func)", "x == 'p_ksp': file.write(\"Bayesian p-value for KS test: \" + str(probs[x][0]) + \"", "' + str(fitpars['popt'])) ### upper limit is the power in the sorted array", "+ str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs)", "/ (self.ps.df * b * self.ps.nphots) elif self.ps.norm == 'variance': binpowers = binpowers", "2) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax =", "enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \" + str(i) + \" is \"", "of individual quantities p_maxpow = float(len([x for x in sim_maxpow if x >", ": ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim)) bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim] bindict['bmax' +", "\" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit)))", "== 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] print(\"The ensemble acceptance rate", "periodograms: for i, x in enumerate(fakeper): try: # print('popt' + str(i) + 'a", "plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in range(N): ax = fig.add_subplot(N", "on the T_R statistic is 2I/S = ' + str(bmaxpow_ul)) ### now turn", "### upper limit is the power in the sorted array where p_maxpow would", "str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) # resfile('Upper limit on maximum signal", "Step 3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty", "\" + str( probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") file.write(", "if len(sim_maxpow) == 0: resfile(\"Analysis of Burst failed! Returning ...\") return False, False,", "np from src.SpectralAnalysis import utils from src.SpectralAnalysis import powerspectrum from src.SpectralAnalysis import mcmc", "of parameters k taken by func1. func2 : function Parametric model for the", "# fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except", "noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed data and compute significance via", "[], [], [], [], [] ### Step 4: Fit fake periodograms and read", "the the simpler model (func1), pick parameter sets from the posterior to create", "= mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<<", "not namestr: namestr = self.namestr try: keys = summary.keys() except AttributeError: raise Exception(\"Summary", "+ \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings for function names from function", "max(n), lw=2, color='navy') plt.title('smoothed (11) data', fontsize=12) plt.savefig(self.namestr + '_maxpow.png', format='png') plt.close() results", "obs=False, plot=True, plotname=plotstr + '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance'])", "predictive p-value of seeing the maximum power in the data under the null", "\"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['maxpow_ul']) + \"\\n\") elif", "but the default (bfgs) should be sufficient for most applications. nchain : int,", "function should include a parameter setting a constant background level, and this parameter", "in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers from", "'fit') if self.plot: ### plot the periodogram and best fit models psfit.plotfits(fitpars1, fitpars2,", "b * self.ps.nphots ** 2.0) # print('len(binps.freq): ' + str(len(binps.freq))) # print('len(binpowers): '", "5% limit should be 0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim ==", "* bintemplate / 2.0 - bintemplate ## now compute rms amplitude at 40,", "averaged to be sure to use the right distribution Attributes ---------- Examples --------", "simulations fail, therefore the 5% limit should be 0.05*len(sims) fiveperlim = int(0.05 *", "resfile('simulated srat: ' + str(sim_srat)) resfile('observed srat: ' + str(fitpars1['sobs'])) resfile(\"p(LRT) = \"", "= posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set up Markov Chain Monte Carlo", "periodogram. Create a posterior distribution of maximum powers and compute a posterior predictive", "fake periodograms # - search for QPOs via a model selection approach using", "str(bc) + 'Hz for a binning of ' + str(b) + ' is", "plots resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_maxpow) + \"", "Print a summary of the results. NOT USED! \"\"\" try: keys = summary.keys()", "name where the output will be stored resfilename = self.namestr + \"_findperiodicity_results.dat\" ##", "for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x", "= self.namestr funcname = str(func).split()[1] # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps)))", "USED! \"\"\" try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be a", "must be a dictionary!\") probs = dict() postpars = dict() ### sort out", "str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow *", "x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers from lowest to highest", "and many samples For emcee, use as many as you can afford (~500)", "bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul, 'p_s3max': [sim_s3max,", "def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None,", "optional, default 1000 The number of simulations to use when computing the posterior", "\" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \" + str(optpars['merit']))", "the posterior to create fake periodograms. Fit each fake periodogram with the same", "fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for x in sim_s11max if x >", "is *always* -1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ###", "str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt,", "[], [], [], [], [], [], [], [], [], [], [] bmax =", "== 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3 bin smoothed] data/model outlier at", "\" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D = \" +", "'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ### note: this is the limit for", "fitmethod=fitmethod, obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting", "'b : ' + str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) #", "fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed", "1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11) data',", "0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n,", "probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest", "pars.keys() N = len(plotkeys) ### number of parameters fig = plt.figure(figsize=(2, N /", "to disk m: integer, optional, default 1 If the periodogram used is the", "x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max)) p_s5max = float(len([x for", "Choices are listed in mle.py, but the default (bfgs) should be sufficient for", "+ \" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of residuals:", "only 0.05*nsim simulations are higher than this ### note: sometimes simulations fail, therefore", "output (text files and plots) plot: boolean, optional, default True If True, several", "str(func).split()[1] # print(\"<< --- len(self.ps beginning): \" + str(len(self.ps.ps))) ### step 1: fit", "a constant background level, and this parameter should be last! par2 : {list,", "\\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for", "for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x", "\"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make strings for", "Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\")", "MCMC. If False, use Metropolis-Hastings. \"\"\" ## the file name where the output", "data-model fit, the resulting residuals should follow a chi-square distribution with two degrees", "follow a chi-square distribution with two degrees of freedom. Find the highest power", "float(len(sim_s5max)) p_s11max = float(len([x for x in sim_s11max if x > fitpars['s11max']])) /", "float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x > fitpars['ksp']])) /", "out p-values and posterior distribution of parameters for x in keys: if x[:2]", "for the parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter", "p-value (for 1000 simulations, the p-value can be constrained only to 0.001). covfactor", "of ' + str(b) + ' is rms = ' + str(brms)) bindict['bin'", "residual power for a binning of ' + str( self.ps.df * b) +", "'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err], 'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit':", "0.05*len(sims) fiveperlim = int(0.05 * len(sim_maxpow)) if fiveperlim == 0: resfile('Warning! Too few", "that the chains have mixed. nsim : int, optional, default 1000 The number", "only in Metropolis-Hastings. use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED),", "test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") elif", "---------- func1 : function Parametric model for the periodogram. Needs to be a", "[], [], [], [], [] bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] -", "models, this index is *always* -1. use_emcee : boolean, optional, default True If", "\" +/- \" + str(ps11max_err)) # resfile('Upper limit on maximum signal power P_max_ul", "fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit if x >", "running MCMC. If False, use Metropolis-Hastings. parname : list, optional, default None Include", "\"ps must be of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr self.plot", "6: Compute errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 -", "step. Used only in Metropolis-Hastings. use_emcee : boolean, optional, default True If True", "picking out the largest power in an observation/set of fake periodograms - search", "str(bmaxpow_ul)) ### now turn upper limit into an rms amplitude: ## first compute", "in funcfake: try: simno = simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False)", "posterior distribution for the the simpler model (func1), pick parameter sets from the", "[p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci,", "\" +/- \" + str(pmerit_err)) resfile(\"Bayesian p-value for the np.sum of residuals: \"", "5: Compute Bayesian posterior probabilities of individual quantities p_maxpow = float(len([x for x", "MCMC step. Used only in Metropolis-Hastings. parname : list, optional, default None Include", "upper limit on the T_R statistic is 2I/S = ' + str(bmaxpow_ul)) ###", "changes the statistical distributions. Set m to the number of periodograms averaged to", "str(bintemplate[0])) ## then compute upper limits for powers I_j depending on frequency binpowers", "k taken by func. fitmethod : string, optional, default \"bfgs\" Choose the optimization", "plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2,", "of the T_R statistic at frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" +", "-1 The index for the noise parameter in func. In the pre-defined models,", "print_summary(self, summary): \"\"\" Print a summary of the results. NOT USED! \"\"\" try:", "color='navy') plt.title('unsmoothed data', fontsize=12) plt.subplot(2, 2, 2) n, bins, patches = plt.hist(sim_s3max, bins=100,", "should include a parameter setting a constant background level, and this parameter should", "for QPOs via a model selection approach using LRTs Parameters ---------- ps :", "fit parameters and associated quantities fitpars1 = getattr(psfit, func1name + 'fit') fitpars2 =", "resfile(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \" +/- \" +", "par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get out best fit parameters and", "# print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in sim_pars_all] ###", "max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins,", "+ str(len(self.ps.ps))) ### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod,", "limit on maximum signal power P_max_ul = ' + str(s3max_ul)) resfile(\"Bayesian p-value for", "from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty lists to store simulated LRTS and", "listed in mle.py, but the default (bfgs) should be sufficient for most applications.", "(2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3, 5, 7, 10, 15,", "i, x in enumerate(postpars[\"rhat\"]): file.write(\"The $R_hat$ value for Parameter \" + str(i) +", "if x > fitpars1['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in sim_merit", "bins), this changes the statistical distributions. Set m to the number of periodograms", "data and compute significance via MCMCs. First, fit the periodogram with func and", "str(len(self.ps.ps))) ### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)", "# return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name + 'fit') sim_pars2 =", "plotting noise1, noise2 : int, optional, default -1 The index for the noise", "= bmaxpow_ul resfile('The posterior p-value for the maximum residual power for a binning", "samples niter : int, optional, default 5000 Sets the length of the Markov", "Parameter \" + str(i) + \" is \" + str(x) + \"\\n\") ###", "pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat *", "'_qpolrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp,", "\"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) +", "parameter in func1 and func2. In the pre-defined models, this index is *always*", "+ str( self.ps.df * b) + 'Hz is p = ' + str(p_bmaxpow)", "periodograms and read out parameters of interest from each fit: for i, x", "+ str(bmaxpow_err)) resfile('The corresponding value of the T_R statistic at frequency f =", "The index for the noise parameter in func. In the pre-defined models, this", "taken by func. fitmethod : string, optional, default \"bfgs\" Choose the optimization algorithm", "+ str(i) + \" is \" + str(x) + \"\\n\") ### print posterior", "psfit, fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None,", "= np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err = np.sqrt(p_s5max * (1.0", "histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close()", "'p_merit': [p_merit, pmerit_err], 'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars, \"postmean\": mcobs.mean,", "p-values and posterior distribution of parameters for x in keys: if x[:2] ==", "periodograms (or bins), this changes the statistical distributions. Set m to the number", "pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit *", "a constant background level, and this parameter should be last! par : {list,", "/ 2 + 1, 2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0],", "deviance D = \" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian", "with power P=\" + str(probs[\"fitpars\"][\"s5max\"])) print(\"Bayesian p-value for the highest [5 bin smoothed]", "\" + str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\" in probs.keys(): print(\"Highest [3", "> (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind]", "str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \" + str(np.mean(sim_deviance))) print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp)", "The number of chains or walkers to use in MCMC. For Metropolis-Hastings, use", "print('popt2: ' + str(fitpars['popt'])) ### Step 4: Fit fake periodograms: for i, x", "func using MCMC, and create fake periodograms from samples of the posterior. For", "x == 'p_s5max': file.write(\"Bayesian p-value for the highest [5 bin smoothed] data/model outlier:", "Step 2: Set up Markov Chain Monte Carlo Simulations ### of model 1:", "P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max = \"", "\"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary): \"\"\" Print", "'fitpars': fitpars, \"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\":", "m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be of type powerspectrum.PowerSpectrum!\" self.ps = ps", "p-value for the highest [unsmoothed] data/model outlier: \" + str( probs[x][0]) + \"", "such that it is possible to build up a posterior distribution for the", "via MCMCs. First, fit the periodogram with func and compute the maximum-a-posteriori (MAP)", "number of periodograms averaged to be sure to use the right distribution Attributes", "np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 -", "func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set up Markov", "+ str(fitpars['popt'])) sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m) # print('popt' + str(i)", "\\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])): file.write(\"theta[\" + str(i) + \"]", "\" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value for KS", "parameters fig = plt.figure(figsize=(2, N / 2 + 1)) plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95,", "LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram object that is to be", "likelihood ratios and compute a posterior predictive p-value that the data can be", "two degrees of freedom. Find the highest power in the residuals and its", "function that takes an array of frequencies and k parameters, and returns an", "sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max']) sim_s11max.append(sim_pars['s11max']) except KeyboardInterrupt: break # except: #", "str(probs[x][1])) elif x == 'p_srat': print(\"Bayesian p-value for the sum of residuals: \"", "if x > obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat", "must be of type powerspectrum.PowerSpectrum!\" self.ps = ps self.namestr = namestr self.plot =", "+ str(b) + '_ul_%.4fHz' % bc] = brms else: continue ### Step 5:", "an array of model powers. The function should include a parameter setting a", "> fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for x in sim_s3max if x", "xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12)", "par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram ' +", "sim_maxpow_sort = np.msort(sim_maxpow) sim_s3max_sort = np.msort(sim_s3max) sim_s5max_sort = np.msort(sim_s5max) sim_s11max_sort = np.msort(sim_s11max) ###", "level, and this parameter should be last! par1 : {list, array-like} Input guesses", "= ' + str(s5max_ul)) resfile(\"Bayesian p-value for maximum power P_max = \" +", "i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i]) +", "/ float(len(sim_srat)) p_s3max = float(len([x for x in sim_s3max if x > fitpars['s3max']]))", "emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\" if plotstr ==", "+ str(np.mean(sim_srat))) ### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err =", "np.sum of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) resfile(\"Bayesian", "+ str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\" Write a summary of the", "S to get powers for each frequency ### Like everything else, this is", "be sure to use the right distribution Attributes ---------- Examples -------- \"\"\" def", "- search for QPOs via a model selection approach using LRTs # #", "str(postpars[\"acceptance\"]) + \" .\\n\") try: file.write(\"The autocorrelation times are: \" + str(postpars[\"acor\"]) +", "multiply by S to get powers for each frequency ### Like everything else,", "plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat *", "+/- \" + str(pmerit_err)) print(\"Bayesian p-value for the np.sum of residuals: \" +", "str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ### upper limit is the power in", "object that can: - pick between two models using likelihood ratio tests -", "str(s11max_ul)) resfile(\"Bayesian p-value for deviance D = \" + str(p_deviance) + \" +/-", "float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x > optpars['ksp']])) /", "in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x", "lists for simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0,", "F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"s11max\"])) print(\"Bayesian p-value", "+ \" +/- \" + str(probs[x][1])) elif x == 'p_s3max': if \"fitpars\" in", "ratio at the maximum-a-posteriori paramters. If func1 and func2 differ in complexity, the", "+ str(probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_merit': print(\"Bayesian", "plotname=plotstr + '_sim' + str(simno) + '_qposearch') sim_lrt.append(slrt) sim_optpars.append(soptpars) sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit'])", "largest power in # an observation/set of fake periodograms # - search for", "[40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ## for 40 Hz: print(searchfreq) for bc", "This class defines a Bayes object that can: - pick between two models", "ratio tests # - find periodicities by picking out the largest power in", "# except: # print(\"Simulation failed! Continuing ...\") # continue # print('popt' + str(i)", "to use when computing the posterior distribution of the likelihood ratio. Note that", "sain, obs=False, noise=noise, m=self.m) # print('popt' + str(i) + 'c : ' +", "lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m)", "x == 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) + \"", "---------- ps : powerspectrum.Powerspectrum A periodogram object that is to be searched for", "two models func1 and func2, compute the likelihood ratio at the maximum-a-posteriori paramters.", "print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance)", "+ \" .\") try: print(\"The autocorrelation times are: \" + str(postpars[\"acor\"])) except KeyError:", "plot the periodogram and best fit models psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True) if self.m", "frequency. NOTE: I rarely ever use this because it's really computationally expensive. Parameters", "it's a good idea to verify that the chains have mixed. nsim :", "sim_y0, sim_srat = [], [], [], [], [], [], [], [] ### Step", "the MAP estimate, divide out the MAP model and find the highest power", "for the noise parameter in func1 and func2. In the pre-defined models, this", "left=0.05, right=0.95, wspace=0.2, hspace=0.2) for i in range(N): ax = fig.add_subplot(N / 2", "= min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)])", "obs=False) lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of", "maximum power P_max = \" + str(p_maxpow) + \" +/- \" + str(pmaxpow_err))", "rate is \" + str(postpars[\"acceptance\"]) + \" .\") try: print(\"The autocorrelation times are:", "x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance if", "for the MCMC step. Used only in Metropolis-Hastings. parname : list, optional, default", "data/model outlier', fontsize=12) plt.subplot(2, 2, 4) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True,", "outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"]) + \"Hz with power P=\" +", "0.8 * max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary = {\"p_lrt\":", "optimization algorithm used when minimizing the -log-likelihood. Choices are listed in mle.py, but", "\" +/- \" + str(ps3max_err)) # resfile('Upper limit on maximum signal power P_max_ul", "Divide the data by the MAP model; for a perfect data-model fit, the", "### empty lists to store simulated LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars,", "get powers for each frequency ### Like everything else, this is n-trial corrected!", "search on each and return likelihood ratios parameters for each for x in", "niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models func1", "* (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance)", "model powers The function should include a parameter setting a constant background level,", "noise1, noise2 : int, optional, default -1 The index for the noise parameter", "posterior predictive p-value (for 1000 simulations, the p-value can be constrained only to", "print(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \" + str( probs[x][0]) +", "psfit.mlest(func, par, obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt: ' + str(fitpars['popt']))", "+ str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier: \"", "purposes. nchain : int, optional, default 10 The number of chains or walkers", "= float(len([x for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max", "= np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ### Display results on screen", "= np.array([x[\"bmax\" + str(b)] for x in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow", "in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x", "(1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) /", "for i, x in enumerate(fakeper): try: # print('popt' + str(i) + 'a :", "Metropolis-Hastings. use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED), use the", "+ str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors of", "MAP model; for a perfect data-model fit, the resulting residuals should follow a", "likelihood ratios parameters for each for x in funcfake: try: simno = simno", "using func1. The number of elements *must* equal the number of parameters k", "= {\"p_lrt\": [p_lrt, plrt_err], \"p_maxpow\": [p_maxpow, pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err],", "'Hz for a binning of ' + str(b) + ' is rms =", "chains or walkers to use in MCMC. For Metropolis-Hastings, use ~10-20 and many", "sim_qpopars.append(sqpopars) sim_deviance.append(soptpars['deviance']) sim_ksp.append(soptpars['ksp']) sim_merit.append(soptpars['merit']) sim_srat.append(soptpars['sobs']) except KeyboardInterrupt: break ### Step 5: Compute Bayesian", "\" + str(p_srat) + \" +/- \" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood", "as you can afford (~500) and fewer samples niter : int, optional, default", "xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0,", "+ 1, 2, i) n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8", "\" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" + str(fitpars1['deviance'])) # print(\"mean(sim_deviance) = \"", "be last! par1 : {list, array-like} Input guesses for the MAP fit using", "each fit: for i, x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)", "distributions. Set m to the number of periodograms averaged to be sure to", "\"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif", "+/- \" + str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for highest [unsmoothed] data/model", "signal power P_max_ul = ' + str(s11max_ul)) resfile(\"Bayesian p-value for deviance D =", "fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err =", "for a perfect data-model fit, the resulting residuals should follow a chi-square distribution", "int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0]))) bins = [1, 3, 5, 7,", "uses BFGS, which is pretty robust for most purposes. nchain : int, optional,", "MCMCs. First, fit the periodogram with func and compute the maximum-a-posteriori (MAP) estimate.", "If False, use Metropolis-Hastings. \"\"\" ## the file name where the output will", "+ ' is 2I/S = ' + str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit", "sim_s11max_sort = np.msort(sim_s11max) ### note: this is the limit for 2*I/S --> multiply", "lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2:", "plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting a QPO + background model to", "elif x == 'p_merit': print(\"Bayesian p-value for Merit function: \" + str(probs[x][0]) +", "for the highest [3 bin smoothed] data/model outlier: \" + str( probs[x][0]) +", "of residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) if self.plot:", "for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$ value for Parameter \" + str(i)", "- self.ps.freq[0]))) bins = [1, 3, 5, 7, 10, 15, 20, 30, 50,", "residuals: \" + str(p_srat) + \" +/- \" + str(psrat_err)) if self.plot: plt.subplot(2,", "\"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs',", "1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy') plt.title('unsmoothed data', fontsize=12)", "bc > (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1 bpow =", "{list, array-like} Input guesses for the MAP fit using func2. The number of", "str(b) + ' is rms = ' + str(brms)) bindict['bin' + str(b) +", "simno = simno + 1 sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False) slrt, soptpars, sqpopars", "mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary", "mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self, func, par,", "sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in", "fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed", "lists to store simulated LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp,", "to be a function that takes an array of frequencies and n parameters,", "# # # TO DO: Need to add smoothing for picking out narrow", "self.ps.ps, lpost, topt=fitpars1['popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=use_emcee, plot=self.plot, printobj=resfile,", "3: create fake periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists", "should be last! par2 : {list, array-like} Input guesses for the MAP fit", "\" + str(fitpars1['sobs'])) resfile(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors", "disk m: integer, optional, default 1 If the periodogram used is the result", "periodograms out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities", "x == 'p_merit': file.write( \"Bayesian p-value for Merit function: \" + str(probs[x][0]) +", "for i in range(len(postpars['postmean'])): print(\"theta[\" + str(i) + \"] \\t \" + str(postpars['postmean'][i])", "fitmethod='bfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two", "1) n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax =", "resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s11max) + \" +/-", "+ str(probs[x][0]) + \" +/- \" + str( probs[x][1]) + \"\\n\") elif x", "bins[:nbins]: binps = fitpars['bindict']['bin' + str(b)] bmaxpow = np.array([x[\"bmax\" + str(b)] for x", "parameter setting a constant background level, and this parameter should be last! par2", "sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [], [],", "func2 : function Parametric model for the periodogram. Needs to be a function", "\"\"\" def __init__(self, ps, namestr='test', plot=True, m=1): assert isinstance(ps, powerspectrum.PowerSpectrum), \"ps must be", "for Merit function: \" + str(p_merit) + \" +/- \" + str(pmerit_err)) resfile(\"Bayesian", "Summary of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95%", "boolean, optional, default True If True, several diagnostic plots will be saved to", "= mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True,", "file. NOT USED! :param summary: :param namestr: :return: \"\"\" if not namestr: namestr", "the posterior predictive p-value (for 1000 simulations, the p-value can be constrained only", "and plots) plot: boolean, optional, default True If True, several diagnostic plots will", "parameter should be last! par1 : {list, array-like} Input guesses for the MAP", "sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x in", "+ \"] \\t \" + str(postpars['postmean'][i]) + \"\\t\" + str( postpars['posterr'][i]) + \"\\t\"", "a Bayes object that can: - pick between two models using likelihood ratio", "+ str(fitpars['bindict'][\"bmax\" + str(b)])) resfile('The upper limit on the T_R statistic is 2I/S", "for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x", "(or bins), this changes the statistical distributions. Set m to the number of", "power at ' + str(bc) + 'Hz for a binning of ' +", "noise=noise, m=self.m) # print('popt' + str(i) + 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars)", "\" + str(probs[x][1])) elif x == 'p_ksp': print(\"Bayesian p-value for KS test: \"", "= float(len([x for x in bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]])) /", "+ str(p_maxpow) + \" +/- \" + str(pmaxpow_err)) resfile(\"Bayesian p-value for deviance D", "mcobs.simulate_periodogram(nsim=nsim) sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max", "[], [], [], [], [], [], [], [] bmax = int(self.ps.freq[-1] / (2.0", "' + str( self.ps.df * b) + 'Hz is p = ' +", "Hz: print(searchfreq) for bc in searchfreq: if bc > (binps.freq[1] - binps.freq[0]): bind", "+/- \" + str(ps5max_err)) # resfile('Upper limit on maximum signal power P_max_ul =", "Fit each fake periodogram with the same models as the data, and compute", "(for 1000 simulations, the p-value can be constrained only to 0.001). covfactor :", "p-value for maximum power P_max = \" + str(p_s11max) + \" +/- \"", "fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting", "sim_merit if x > optpars['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in", "func1. The number of elements *must* equal the number of parameters k taken", "use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models func1 and func2, compute", "= [], [], [], [], [], [], [], [] ### Step 4: Fit", ":return: \"\"\" if not namestr: namestr = self.namestr try: keys = summary.keys() except", "for x in probs.keys(): if x == 'p_lrt': file.write( \"Bayesian p-value for Likelihood", "fitpars2 = getattr(psfit, func2name + 'fit') if self.plot: ### plot the periodogram and", "mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") file.write(\"---------------------------------------------\\n\") for i in range(len(postpars['postmean'])):", "limit for 2*I/S --> multiply by S to get powers for each frequency", "if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance = float(len([x for x in sim_deviance", "- fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"] for x in", "+ 0.01 * pars[plotkeys[i]][0], 0.8 * n, \"p = \" + str(pars[plotkeys[i]][1])) ax.title(\"Posterior", "sim_merit if x > fitpars['merit']])) / float(len(sim_merit)) p_srat = float(len([x for x in", "chains have mixed. nsim : int, optional, default 1000 The number of simulations", "self.namestr = namestr self.plot = plot self.m = m def choose_noise_model(self, func1, par1,", "summary of the results. NOT USED! \"\"\" try: keys = summary.keys() except AttributeError:", "of periodograms averaged to be sure to use the right distribution Attributes ----------", "of model 1: mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost, topt=fitpars['popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain,", "be a function that takes an array of frequencies and k parameters, and", "print(\"KSP(obs) = \" + str(optpars['ksp'])) print(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) print(\"Merit(obs) = \"", "pmaxpow_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit, pmerit_err], \"p_srat\": [p_srat, psrat_err],", "definition func1name = \"model1\" func2name = \"model2\" ### step 1: fit both models", "failed! Continuing ...\") # continue # print('popt' + str(i) + 'd : '", "copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt'])) ### Step 4: Fit fake periodograms: for", "time series # # This class defines a Bayes object that can: #", "elements *must* equal the number of parameters k taken by func1. func2 :", "for highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\") elif x ==", "# if lrt > 20: # fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i)) sim_lrt.append(lrt) sim_deviance.append(sim_pars1['deviance']) sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow'])", "if \"fitpars\" in probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier at frequency F=\"", "smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with power", "min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'],", "number of chains or walkers to use in MCMC. For Metropolis-Hastings, use ~10-20", "distribution of parameters for x in keys: if x[:2] == 'p_': probs[x] =", "for x in bmaxpow if x > fitpars['bindict'][\"bmax\" + str(b)]])) / float( len(bmaxpow))", "p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b)", "by func. fitmethod : string, optional, default \"bfgs\" Choose the optimization algorithm used", "+ str(obslrt)) # print(\"mean(sim_lrt) = \" + str(np.mean(sim_lrt))) # print(\"Deviance(obs) = \" +", "residuals should follow a chi-square distribution with two degrees of freedom. Find the", "x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max)) ### sort maximum powers", "+ str(b) + ' is P = ' + str(bpow * (self.ps.df *", "summary[x] print(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"]) + \" .\") try:", "max(n), lw=4, color='navy') plt.savefig(self.namestr + '_lrt.png', format='png') plt.close() summary = {\"p_lrt\": [p_lrt, plrt_err],", "/ 1.2, max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s3max'], 0.0, max(n),", "realizations of the broadband noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ### empty", "obs=True, noise=noise, m=self.m) bindict = fitpars['bindict'] # print('popt: ' + str(fitpars['popt'])) ## which", "for i, x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt =", "Step 5: Compute Bayesian posterior probabilities of individual quantities p_deviance = float(len([x for", "* self.ps.nphots) elif self.ps.norm == 'variance': binpowers = binpowers * self.ps.n ** 2.0", "\"\"\" if not namestr: namestr = self.namestr try: keys = summary.keys() except AttributeError:", "p_deviance = float(len([x for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance))", "sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [], [], [],", "elif x == 'p_srat': file.write(\"Bayesian p-value for the sum of residuals: \" +", "+ str(len(self.ps.ps))) if self.m == 1: lpost = posterior.PerPosterior(self.ps, func) else: lpost =", "\" +/- \" + str(pdeviance_err)) print(\"Bayesian p-value for KS test: \" + str(p_ksp)", "mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000,", "+ str(ps11max_err)) # resfile('Upper limit on maximum signal power P_max_ul = ' +", "to be sure to use the right distribution Attributes ---------- Examples -------- \"\"\"", "model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True,", "np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 -", "str(b)].freq, *fitpars['popt']) resfile(\"bintemplate[0]: \" + str(bintemplate[0])) ## then compute upper limits for powers", "differ in complexity, the less complex should be func1. Then sample the posterior", "values for the real data obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True,", "bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m') plt.savefig(self.namestr + '_qpolrt.png',", "Parameter \" + str(i) + \" is \" + str(x)) ### print posterior", "bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s3maxfreq\"]) + \"Hz with", "+ \"\\n\") elif x == 'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed] data/model", "in this list or array must match the number of parameters k taken", "+ str(len(binps.freq))) # print('len(binpowers): ' + str(len(binpowers))) if searchfreq is None: searchfreq =", "str(i) + 'a : ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) #", "= self.namestr try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be a", "of frequencies and n parameters, and returns an array of model powers The", "at frequency f = ' + str( fitpars[\"bindict\"][\"bmaxfreq\" + str(b)]) + ' is", "\" +/- \" + str(pmerit_err)) print(\"Bayesian p-value for the np.sum of residuals: \"", "plt.title('smoothed (3) data', fontsize=12) plt.subplot(2, 2, 3) n, bins, patches = plt.hist(sim_s3max, bins=100,", "4) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax =", "computationally expensive. Parameters ---------- func : function Parametric model for the periodogram. Needs", "/ float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if x > fitpars1['ksp']]))", "sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims) fiveperlim = int(0.05", "everything else, this is n-trial corrected! # print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort))) resfile('ninetyfiveperlim:", "fit: for i, x in enumerate(fakeper): try: fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) lrt", "file name where the output will be stored resfilename = self.namestr + \"_findperiodicity_results.dat\"", "--> multiply by S to get powers for each frequency ### Like everything", "*always* -1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile = utils.TwoPrint(resfilename) ### make", "p_srat) / float(len(sim_ksp))) ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp))) ps5max_err", "0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5) data/model outlier', fontsize=12) plt.subplot(2,", "of realizations of the broadband noise model from MCMCs funcfake = mcobs.simulate_periodogram(nsim=nsim) ###", "probs = dict() postpars = dict() ### sort out p-values and posterior distribution", "+ str(p_s11max) + \" +/- \" + str(ps11max_err)) # resfile('Upper limit on maximum", "str(probs[x][1])) elif x == 'p_deviance': print(\"Bayesian p-value for deviance D = \" +", "\"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed] data/model", "\"\\n\") elif x == 'p_s11max': file.write(\"Bayesian p-value for the highest [11 bin smoothed]", "print(\"The $R_hat$ value for Parameter \" + str(i) + \" is \" +", "str(probs[\"fitpars\"][\"s3max\"])) print(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier: \" +", "\\n\") print(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\") print(\"---------------------------------------------\\n\") for", "float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow)) p_deviance =", "between two models using likelihood ratio tests # - find periodicities by picking", "= np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0", "for x in funcfake: try: simno = simno + 1 sim_psfit = mle.PerMaxLike(x,", "\" + str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt) = \"", "+/- ' + str(bmaxpow_err)) resfile('The corresponding value of the T_R statistic at frequency", "In the pre-defined models, this index is *always* -1. use_emcee : boolean, optional,", "sim_pars_all] ### get out binned powers: maxpows_all = {} binprob = {} for", "color=\"cyan\", histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2)", "Like everything else, this is n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6:", "+ str(bmaxpow_ul)) ### now turn upper limit into an rms amplitude: ## first", "AttributeError: raise Exception(\"Summary must be a dictionary!\") probs = dict() postpars = dict()", "[], [], [], [], [], [], [], [] ### Step 4: Fit fake", "elif x == 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at", "# class Bayes: Bayesian data analysis for time series # # This class", "m: integer, optional, default 1 If the periodogram used is the result of", "taken by func2. fitmethod : string, optional, default bfgs Allows the choice of", "emcee package for running MCMC. If False, use Metropolis-Hastings. parname : list, optional,", "already if self.ps.norm == 'leahy': binpowers = binpowers / (self.ps.df * b *", "/ float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort = np.msort(bmaxpow) ###", "### Step 6: Compute errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow *", "p-value for maximum power P_max = \" + str(p_s3max) + \" +/- \"", "+ 'd : ' + str(fitpars['popt'])) # print('popt3: ' + str(fitpars['popt'])) ### upper", "plt.subplot(2, 2, 4) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin,", "+/- \" + str(psrat_err)) resfile(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) +", "resfile('The upper limit on the rms amplitude at ' + str(bc) + 'Hz", "= fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m) # resfile('Fitting of fake periodogram", "if x == 'p_lrt': print(\"Bayesian p-value for Likelihood Ratio: \" + str(probs[x][0]) +", "probs[x][0]) + \" +/- \" + str(probs[x][1]) + \"\\n\") file.write( \"Upper limit for", "for the the simpler model (func1), pick parameter sets from the posterior to", "strings for function names from function definition func1name = \"model1\" func2name = \"model2\"", "errors of Bayesian posterior probabilities pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) /", "signal power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power P_max", "guesses for the MAP fit using func2. The number of elements *must* equal", "\"postmean\": mcobs.mean, \"posterr\": mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return", "mcobs.std, \"postquantiles\": mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self,", "resfile(\"mean(sim_ksp) = \" + str(np.mean(sim_ksp))) resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \"", "file resfile = utils.TwoPrint(resfilename) ### step 1: fit model to observation psfit =", "40, 70, 100 and 300 Hz ## first, convert powers into rms normalization,", "' failed! Returning ...') # return psfit, fakeper, mcobs sim_pars1 = getattr(fitfake, func1name", "is P = ' + str(bpow * (self.ps.df * b * self.ps.nphots))) resfile('The", "\"w\") pickle.dump(summary, picklefile) picklefile.close() file = open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance", "+ 'a : ' + str(fitpars['popt'])) fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False) # print('popt'", "def print_summary(self, summary): \"\"\" Print a summary of the results. NOT USED! \"\"\"", "plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow,", "mle from src.SpectralAnalysis import posterior ########################################## # # class Bayes: Bayesian data analysis", "cPickle as pickle except ImportError: import pickle import copy import numpy as np", "RECOMMENDED), use the emcee package for running MCMC. If False, use Metropolis-Hastings. \"\"\"", "'p_s11max': file.write(\"Bayesian p-value for the highest [11 bin smoothed] data/model outlier: \" +", "+ ' is P = ' + str(bpow * (self.ps.df * b *", "= \" + str(optpars['merit'])) print(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) print(\"Srat(obs) = \" +", "' is P = ' + str(bpow * (self.ps.df * b * self.ps.nphots)))", "in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp)) p_merit = float(len([x for x", "of Parameters: \\n\") file.write(\"parameter \\t mean \\t\\t sd \\t\\t 5% \\t\\t 95% \\n\")", "probs.keys(): print(\"Highest [11 bin smoothed] data/model outlier at frequency F=\" + str( probs[\"fitpars\"][\"s11maxfreq\"])", "upper limit into an rms amplitude: ## first compute broadband noise model for", "1: lpost = posterior.PerPosterior(self.ps, func) else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step", "fake periodograms - search for QPOs via a model selection approach using LRTs", "func2, compute the likelihood ratio at the maximum-a-posteriori paramters. If func1 and func2", "be last! par : {list, array-like} Input guesses for the parameters taken by", "constant background level, and this parameter should be last! par2 : {list, array-like}", "results def find_qpo(self, func, ain, fitmethod='constbfgs', nchain=10, niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True):", "+ str(p_lrt)) # print(\"LRT(obs) = \" + str(obslrt)) # print(\"mean(sim_lrt) = \" +", "binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate ## now compute rms", "sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for x in", "max(25, fitpars['s3max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy')", "fitpars['sobs']])) / float(len(sim_srat)) p_s3max = float(len([x for x in sim_s3max if x >", "histtype='stepfilled') xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2) plt.axis([xmin,", "fit both models to observation and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)", "sim_lrt if x > obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for x in", "maximum power P_max = \" + str(p_s5max) + \" +/- \" + str(ps5max_err))", "* 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (11)", "covfactor=1.0, parname=None, noise=-1, use_emcee=True, searchfreq=None): \"\"\" Find periodicities in observed data and compute", "(bfgs) should be sufficient for most applications. nchain : int, optional, default 10", "except: # print(\"Simulation failed! Continuing ...\") # continue # print('popt' + str(i) +", "for x in keys: if x[:2] == 'p_': probs[x] = summary[x] else: postpars[x]", "print(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) + \" +/- \"", "2, 3) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin, xmax", "\" + str(probs[x][1]) + \"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value for the", "fitmethod=fitmethod, obs=False) # print('popt' + str(i) + 'b : ' + str(fitpars['popt'])) sim_pars", "[], [], [] simno = 0 ### run QPO search on each and", "periodograms averaged to be sure to use the right distribution Attributes ---------- Examples", "for KS test: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]) +", "x in sim_lrt if x > obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for", "in probs.keys(): if x == 'p_lrt': file.write( \"Bayesian p-value for Likelihood Ratio: \"", "number of parameters n taken by func2. fitmethod : string, optional, default bfgs", "\" + str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5", "/ float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp))) ps3max_err =", "p_deviance) / float(len(sim_ksp))) pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err", "self.m) ### Step 2: Set up Markov Chain Monte Carlo Simulations ### of", "when minimizing the -log-likelihood. Choices are listed in mle.py, but the default (bfgs)", "noise: int, optional, default -1 The index for the noise parameter in func.", "p-value for Merit function: \" + str(p_merit) + \" +/- \" + str(pmerit_err))", "/ float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) psrat_err =", "elif x == 'p_s11max': file.write(\"Bayesian p-value for the highest [11 bin smoothed] data/model", "(self.ps.df * b * self.ps.nphots) elif self.ps.norm == 'variance': binpowers = binpowers *", "1 If the periodogram used is the result of averaging several individual periodograms", "x[:2] == 'p_': probs[x] = summary[x] else: postpars[x] = summary[x] picklefile = open(namestr", "covfactor=1.0, use_emcee=True, parname=None, noise1=-1, noise2=-1, writefile=True): \"\"\" Fit two models func1 and func2,", "file.write( \"Upper limit for highest [unsmoothed] data/model outlier: \" + str(summary['s5max_ul']) + \"\\n\")", "and return likelihood ratios parameters for each for x in funcfake: try: simno", "\"\\n\") elif x == 'p_s3max': file.write(\"Bayesian p-value for the highest [3 bin smoothed]", "sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [],", "' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp']) sim_maxpow.append(sim_pars['maxpow']) sim_merit.append(sim_pars['merit']) sim_fpeak.append(sim_pars['maxfreq']) sim_y0.append(sim_pars['mfit'][sim_pars['maxind']]) sim_srat.append(sim_pars['sobs']) sim_s3max.append(sim_pars['s3max']) sim_s5max.append(sim_pars['s5max'])", "wspace=0.2, hspace=0.2) for i in range(N): ax = fig.add_subplot(N / 2 + 1,", "data by the MAP model; for a perfect data-model fit, the resulting residuals", "= float(len([x for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit)) p_srat", "# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s5max_ul)) resfile(\"Bayesian", "obslrt])) / float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if x >", "optional, default bfgs Allows the choice of different minimization algorithms. Default uses BFGS,", "' is rms = ' + str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz' %", "x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for", "self.m = m def choose_noise_model(self, func1, par1, func2, par2, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000,", "~10-20 and many samples For emcee, use as many as you can afford", "fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0,", "default 10 The number of chains or walkers to use in MCMC. For", "= np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp))) psrat_err = np.sqrt(p_srat * (1.0", "tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=nchain, parname=parname, check_conv=True, namestr=self.namestr, use_emcee=True, plot=self.plot, printobj=resfile, m=self.m) ### Step", "series analysis This class defines a Bayes object that can: - pick between", "in sim_srat if x > optpars['sobs']])) / float(len(sim_srat)) print(\"p(LRT) = \" + str(p_lrt))", "+ str(plrt_err)) if self.plot: n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled') plt.vlines(obslrt,", "\" + str(p_ksp) + \" +/- \" + str(pksp_err)) print(\"Bayesian p-value for Merit", "is \" + str(x) + \"\\n\") ### print posterior summary of parameters: file.write(\"--", "of freedom. Find the highest power in the residuals and its frequency. Sample", "nbins = len(binlist) / 4 sain = copy.copy(fitpars['popt']) # print('popt2: ' + str(fitpars['popt']))", "### Step 2: Set up Markov Chain Monte Carlo Simulations ### of model", "= \" + str(p_lrt)) resfile(\"KSP(obs) = \" + str(fitpars1['ksp'])) resfile(\"mean(sim_ksp) = \" +", "Bayesian data analysis for time series # # This class defines a Bayes", "- p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err sim_bmaxpow_sort =", "binpowers / (self.ps.df * b * self.ps.nphots) elif self.ps.norm == 'variance': binpowers =", "last! par1 : {list, array-like} Input guesses for the MAP fit using func1.", "and compute LRT psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True) obslrt = psfit.compute_lrt(func1, par1, func2,", "500.0, 1000.0] ## for 40 Hz: print(searchfreq) for bc in searchfreq: if bc", "p-value for Likelihood Ratio: \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1])", "bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow))) bindict['p_maxpow' + str(b) +", "resfile(\"Bayesian p-value for maximum power P_max = \" + str(p_s5max) + \" +/-", "files and plots) plot: boolean, optional, default True If True, several diagnostic plots", "utils.TwoPrint(resfilename) ### step 1: fit model to observation psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)", "\" + str(psrat_err)) print(\"Bayesian p-value for Likelihood Ratio: \" + str(p_lrt) + \"", "\"\"\" try: keys = summary.keys() except AttributeError: raise Exception(\"Summary must be a dictionary!\")", "if x > fitpars1['merit']])) / float(len(sim_merit)) p_lrt = float(len([x for x in sim_lrt", "out of MCMCs fakeper = mcobs.simulate_periodogram(nsim=nsim) ### empty lists for simulated quantities of", "be 0.05 ### i.e. when only 0.05*nsim simulations are higher than this ###", "fit the periodogram with func and compute the maximum-a-posteriori (MAP) estimate. Divide the", "plt.subplot(2, 2, 3) n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') xmin,", "x > optpars['deviance']])) / float(len(sim_deviance)) p_ksp = float(len([x for x in sim_ksp if", "float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if x > optpars['sobs']])) /", "np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind] brms = np.sqrt(bpow * b *", "+ \" +/- \" + str(ps5max_err)) # resfile('Upper limit on maximum signal power", "chi-square distribution with two degrees of freedom. Find the highest power in the", "postpars[x] = summary[x] picklefile = open(namestr + \"_summary_pickle.dat\", \"w\") pickle.dump(summary, picklefile) picklefile.close() file", "str(probs[x][1]) + \"\\n\") elif x == 'p_srat': file.write(\"Bayesian p-value for the sum of", "mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return summary def print_summary(self, summary): \"\"\"", "continue # print('popt' + str(i) + 'd : ' + str(fitpars['popt'])) # print('popt3:", "> fitpars['s5max']])) / float(len(sim_s5max)) p_s11max = float(len([x for x in sim_s11max if x", "limit for highest [unsmoothed] data/model outlier: \" + str(summary['s11max_ul']) + \"\\n\") return def", "= np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind] brms = np.sqrt(bpow * b", "the maximum-a-posteriori (MAP) estimate. Divide the data by the MAP model; for a", "MAP estimate, divide out the MAP model and find the highest power in", "maximum signal power P_max_ul = ' + str(maxpow_ul)) resfile(\"Bayesian p-value for maximum power", "individual quantities p_maxpow = float(len([x for x in sim_maxpow if x > fitpars1['maxpow']]))", "+ str(fitpars['popt'])) ### Step 4: Fit fake periodograms: for i, x in enumerate(fakeper):", "if bc > (binps.freq[1] - binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1 bpow", "+/- \" + str(psrat_err)) if self.plot: plt.subplot(2, 2, 1) n, bins, patches =", "the noise parameter in func1 and func2. In the pre-defined models, this index", "power in the data under the null hypothesis (no QPO). Parameters ---------- func", "ninetyfiveperlim = len(sim_maxpow) - fiveperlim # print('popt4: ' + str(fitpars['popt'])) bindicts = [x[\"bindict\"]", "be saved to disk m: integer, optional, default 1 If the periodogram used", "print(\"Bayesian p-value for the highest [3 bin smoothed] data/model outlier: \" + str(", "(text files and plots) plot: boolean, optional, default True If True, several diagnostic", "for deviance D = \" + str(probs[x][0]) + \" +/- \" + str(probs[x][1]))", "frequency F=\" + str( probs[\"fitpars\"][\"maxfreq\"]) + \"Hz with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian", "ax.hist(pars[plotkeys[i]][0], 30) ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4) ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0],", "Bayes(object): \"\"\" Bayesian time series analysis This class defines a Bayes object that", "resfile(\"Merit(obs) = \" + str(fitpars1['merit'])) resfile(\"mean(sim_merit) = \" + str(np.mean(sim_merit))) resfile(\"Srat(obs) = \"", "summary = {\"p_lrt\": [p_lrt, plrt_err], \"p_deviance\": [p_deviance, pdeviance_err], \"p_ksp\": [p_ksp, pksp_err], \"p_merit\": [p_merit,", "add smoothing for picking out narrow signals # # # class Bayes(object): \"\"\"", "break ### Step 5: Compute Bayesian posterior probabilities of individual quantities p_deviance =", "x in bindicts]) maxpows_all[\"bin\" + str(b)] = bmaxpow bindict['sim_bmaxpow' + str(b)] = bmaxpow", "a list of strings here to set parameter names for plotting noise1, noise2", "data under the null hypothesis (no QPO). Parameters ---------- func : function Parametric", "elements in this list or array must match the number of parameters k", "try: print(\"The autocorrelation times are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor not", "used to identify this periodogram when saving output (text files and plots) plot:", "[], [], [], [], [], [], [] simno = 0 ### run QPO", "model selection approach using LRTs Parameters ---------- ps : powerspectrum.Powerspectrum A periodogram object", "bintemplate ## now compute rms amplitude at 40, 70, 100 and 300 Hz", "except AttributeError: raise Exception(\"Summary must be a dictionary!\") probs = dict() postpars =", "x == 'p_maxpow': if \"fitpars\" in probs.keys(): print(\"Highest [unsmoothed] data/model outlier at frequency", "mcobs.ci, \"rhat\": mcobs.rhat, \"acor\": mcobs.acor, \"acceptance\": mcobs.acceptance} return results def find_qpo(self, func, ain,", "elif x == 'p_maxpow': file.write(\"Bayesian p-value for the highest [unsmoothed] data/model outlier: \"", "= \" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value for", "quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [],", "str(probs[x][1])) elif x == 'p_s5max': if \"fitpars\" in probs.keys(): print(\"Highest [5 bin smoothed]", "1.2, max(25, fitpars['s5max'] * 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2,", "binps.freq[0]): bind = np.searchsorted(binps.freq, bc) - 1 bpow = binpowers[bind] brms = np.sqrt(bpow", "self.ps.df) resfile('The upper limit on the power at ' + str(bc) + 'Hz", "be searched for QPOs namestr: string, optional, default \"test\" The string that will", "MCMC. If False, use Metropolis-Hastings. parname : list, optional, default None Include a", "/ float(len(sim_lrt)) p_srat = float(len([x for x in sim_srat if x > fitpars1['sobs']]))", "returns an array of model powers. The function should include a parameter setting", "D = \" + str(p_deviance) + \" +/- \" + str(pdeviance_err)) resfile(\"Bayesian p-value", "-1. use_emcee : boolean, optional, default True If True (STRONGLY RECOMMENDED), use the", "as pickle except ImportError: import pickle import copy import numpy as np from", "* 1.2) plt.axis([xmin, xmax, 0.0, max(n)]) plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy') plt.title('smoothed (5)", "of elements *must* equal the number of parameters n taken by func2. fitmethod", "obs=False) slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True, plotname=plotstr + '_sim' +", "this can be smaller, but it's a good idea to verify that the", "for the highest [unsmoothed] data/model outlier: \" + str( probs[x][0]) + \" +/-", "else: lpost = posterior.StackPerPosterior(self.ps, func, self.m) ### Step 2: Set up Markov Chain", "optional, default 1.0 A tuning parameter for the MCMC step. Used only in", "in probs.keys(): print(\"Highest [5 bin smoothed] data/model outlier at frequency F=\" + str(", "the parameters taken by func. The number of elements in this list or", "searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0] ## for 40 Hz: print(searchfreq)", "fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul,", "out the largest power in an observation/set of fake periodograms - search for", "= np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp))) pmerit_err = np.sqrt(p_merit * (1.0", "the right distribution Attributes ---------- Examples -------- \"\"\" def __init__(self, ps, namestr='test', plot=True,", "store simulated LRTS and parameters in sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat", "= np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp))) plrt_err = np.sqrt(p_lrt * (1.0", "b * self.ps.nphots))) resfile('The upper limit on the rms amplitude at ' +", "float(len(sim_merit)) p_srat = float(len([x for x in sim_srat if x > fitpars['sobs']])) /", "compute autocorrelation times for the parameters\") for i, x in enumerate(postpars[\"rhat\"]): print(\"The $R_hat$", "# print('popt' + str(i) + 'c : ' + str(fitpars['popt'])) sim_pars_all.append(sim_pars) sim_deviance.append(sim_pars['deviance']) sim_ksp.append(sim_pars['ksp'])", "posterior summary of parameters: print(\"-- Posterior Summary of Parameters: \\n\") print(\"parameter \\t mean", "## the file name where the output will be stored resfilename = self.namestr", "len(bmaxpow)) bindict[\"p_maxpow\" + str(b)] = p_bmaxpow bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow)", "for maximum power P_max = \" + str(p_s11max) + \" +/- \" +", "If False, use Metropolis-Hastings. \"\"\" if plotstr == None: plotstr = self.namestr funcname", "posterior.PerPosterior(self.ps, func1) else: lpost = posterior.StackPerPosterior(self.ps, func1, self.m) ### Step 2: Set up", "-1 The index for the noise parameter in func1 and func2. In the", "MAP fit using func1. The number of elements *must* equal the number of", "/ float(len(sim_s3max)) p_s5max = float(len([x for x in sim_s5max if x > fitpars['s5max']]))", "= np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp))) pdeviance_err = np.sqrt(p_deviance * (1.0", "\" + str(optpars['sobs'])) print(\"mean(sim_srat) = \" + str(np.mean(sim_srat))) ### Step 6: Compute errors", ".\") try: print(\"The autocorrelation times are: \" + str(postpars[\"acor\"])) except KeyError: print(\"Module Acor", "= open(namestr + \"_summary.dat\", \"w\") file.write(\"The ensemble acceptance rate is \" + str(postpars[\"acceptance\"])", ":param namestr: :return: \"\"\" if not namestr: namestr = self.namestr try: keys =", "fake periodogram with the same models as the data, and compute the likelihood", "highest [11 bin smoothed] data/model outlier: \" + str( probs[x][0]) + \" +/-", "np.sqrt(bpow * b * self.ps.df) resfile('The upper limit on the power at '", "mcobs.acceptance} return psfit, fakeper, summary def find_periodicity(self, func, par, fitmethod='bfgs', nchain=10, niter=5000, nsim=1000,", "n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color=\"cyan\", histtype='stepfilled') plt.vlines(obslrt, 0.0, 0.8 *", "now turn upper limit into an rms amplitude: ## first compute broadband noise", "this index is *always* -1. \"\"\" resfilename = self.namestr + \"_choosenoisemodel.dat\" resfile =", "func2, par2, noise1=noise1, noise2=noise2, m=self.m) ### get out best fit parameters and associated", "niter=5000, nsim=1000, covfactor=1.0, parname=None, plotstr=None, use_emcee=True): \"\"\" Find QPOs by fitting a QPO", "periodogram ' + str(i) + ' failed! Returning ...') # return psfit, fakeper,", "n-trial corrected! maxpow_ul = sim_maxpow_sort[ninetyfiveperlim] ### Step 6: Compute errors of Bayesian posterior", "I rarely ever use this because it's really computationally expensive. Parameters ---------- func", "float(len([x for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit)) p_lrt =", "x == 'p_deviance': print(\"Bayesian p-value for deviance D = \" + str(probs[x][0]) +", "\" + str(probs[x][1])) return def write_summary(self, summary, namestr=None): \"\"\" Write a summary of", "+ ' is rms = ' + str(brms)) bindict['bin' + str(b) + '_ul_%.4fHz'", "periodograms. Fit each fake periodogram with the same models as the data, and", "format='png') plt.close() results = {\"fitpars\": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs, 'p_maxpow':", "periodograms from samples of the posterior. For each fake periodogram, find the MAP", "with power P=\" + str(probs[\"fitpars\"][\"maxpow\"])) print(\"Bayesian p-value for the highest [unsmoothed] data/model outlier:", "sim_ksp.append(sim_pars1['ksp']) sim_maxpow.append(sim_pars1['maxpow']) sim_merit.append(sim_pars1['merit']) sim_fpeak.append(sim_pars1['maxfreq']) sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']]) sim_srat.append(sim_pars1['sobs']) except KeyboardInterrupt: break if len(sim_maxpow) == 0:", "= summary.keys() except AttributeError: raise Exception(\"Summary must be a dictionary!\") probs = dict()", "for plotting noise1, noise2 : int, optional, default -1 The index for the", "x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat)) resfile('simulated srat: ' +", "for Likelihood Ratio: \" + str(p_lrt) + \" +/- \" + str(plrt_err)) if", "for a binning of ' + str(b) + ' is P = '", "obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps beginning):", "frequency binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate ## now compute", "sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \\ sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max =", "func1 and func2 differ in complexity, the less complex should be func1. Then", "that takes an array of frequencies and k parameters, and returns an array", "simulated quantities of interest: sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat =", "for the MAP fit using func1. The number of elements *must* equal the", "probs[x][0]) + \" +/- \" + str(probs[x][1])) elif x == 'p_s11max': if \"fitpars\"", "fitmethod=fitmethod, obs=True) fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m) # print(\"<< --- len(self.ps" ]
[ "print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return ( first_child if flag", "print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable))", "( first_child if flag else second_child ) global_variable = parent('testing', True) print('-----') global_variable()", "{}, {}'.format(msg, local_variable)) return ( first_child if flag else second_child ) global_variable =", "'15' print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg,", "parent(msg, flag: bool): local_variable = '15' print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg))", "print('first_child {}, {}'.format(msg, local_variable)) return ( first_child if flag else second_child ) global_variable", "first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return ( first_child if", "def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return ( first_child", "executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return", "def parent(msg, flag: bool): local_variable = '15' print('parent executed {}'.format(msg)) def first_child(): print('first_child", "local_variable)) return ( first_child if flag else second_child ) global_variable = parent('testing', True)", "{}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return ( first_child if flag else", "def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return ( first_child if flag else second_child", "return ( first_child if flag else second_child ) global_variable = parent('testing', True) print('-----')", "second_child(): print('first_child {}, {}'.format(msg, local_variable)) return ( first_child if flag else second_child )", "bool): local_variable = '15' print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child():", "local_variable = '15' print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child", "flag: bool): local_variable = '15' print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def", "= '15' print('parent executed {}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {},", "{}'.format(msg)) def first_child(): print('first_child {}'.format(msg)) def second_child(): print('first_child {}, {}'.format(msg, local_variable)) return (", "{}'.format(msg, local_variable)) return ( first_child if flag else second_child ) global_variable = parent('testing'," ]
[ "lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new", "import torch.nn as nn import torch import torch.nn.functional as F from torch.autograd import", "= nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) )", "z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new,", "= nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim,", "batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self,", "= embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim,", "for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z)", "hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def", "sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out =", "voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding =", "Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim", "lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new =", "u), dim=1) out = self.fc(out) return out def info_loss(MI, x, x_length, z, u,", "nn.Linear(150, target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x)", "import torch import torch.nn.functional as F from torch.autograd import Variable class FeatureExtractor(nn.Module): def", "self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300):", "sentence): x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module): def", "nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self,", "__init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding", "__init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim,", "nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden =", "embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 =", "lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i", "hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim,", "= nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2", "x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean()", "lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i", "= nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)", "= self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out =", "import torch.nn.functional as F from torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size,", "batch_first=True) def forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out", "def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out,", "out = self.fc(out) return out def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length):", "hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(),", "lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] -", "z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime,", "+ target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence, z, u,", "150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters()", "x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out", "self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential(", "torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim", "z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x)", "FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim =", "for i in range(len(lstm2_out))], dim=0) out = self.fc(out) return out class MutlInfo(nn.Module): def", "class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim", "nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden", "self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim,", "torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u), dim=1)", "-F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean() return Ej -", "= nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x, sentence_length):", "hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence, z,", "self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden", "x, x_length, z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em", "in range(len(lstm2_out))], dim=0) out = self.fc(out) return out class MutlInfo(nn.Module): def __init__(self, voacb_size,", "= hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self,", "* hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence,", "self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0) out", "voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding", "nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3", "nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters()", "self.fc(out) return out def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length): Ej =", "x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self,", "def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim =", "self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc =", "torch import torch.nn.functional as F from torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self,", "nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)", "x_length, z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em =", "= self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2,", "self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))],", "range(len(lstm2_out))], dim=0) out = self.fc(out) return out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2,", "embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)", "1) ) def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x =", "self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm =", "batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(),", "hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim)", "self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new,", "out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim =", "torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z", "self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out)", "hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc", "lstm_out, lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier,", "range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i,", "forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out)", "def forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out class", "nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) )", "target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding =", "super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc", "x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z, u,", "150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out,", "embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size,", "hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim)", "= self.fc(out) return out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo,", "= nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim,", "class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)", "nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters()", "= torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0) out = self.fc(out)", "in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u), dim=1) out = self.fc(out) return", "lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))],", "nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True),", "= hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 =", "= torch.cat((x_new, z_new, u), dim=1) out = self.fc(out) return out def info_loss(MI, x,", "def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden =", "torch.cat((x_new, z_new, u), dim=1) out = self.fc(out) return out def info_loss(MI, x, x_length,", "forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden", "hidden_dim, batch_first=True) def forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return", "hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)", "- 1] for i in range(len(lstm2_out))], dim=0) out = self.fc(out) return out class", "1] for i in range(len(lstm2_out))], dim=0) out = self.fc(out) return out class MutlInfo(nn.Module):", "self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1]", "hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim", "= nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150),", "__init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim", "self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150,", "dim=0) out = self.fc(out) return out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300,", "= self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for", "= nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden =", "Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean() return", "lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for", "self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i,", "nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x = self.embedding(sentence)", "self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden =", "self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim,", "self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out)", "i in range(len(lstm2_out))], dim=0) out = self.fc(out) return out class MutlInfo(nn.Module): def __init__(self,", "lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__()", "self.fc = nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1)", "z_new, u), dim=1) out = self.fc(out) return out def info_loss(MI, x, x_length, z,", "= self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in", "lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0)", "self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x", "return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim,", ") def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden", "in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new =", "out = self.fc(out) return out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300):", "import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim =", "self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i", "sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z =", "as F from torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300):", "self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 =", "u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out,", "nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters()", "lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0)", "lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for", "u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z,", "forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x) return lstm_out class Classifier(nn.Module):", "torch.nn.functional as F from torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300,", "torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0) out = self.fc(out) return", "= self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden =", "self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 =", "sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u), dim=1) out", "nn import torch import torch.nn.functional as F from torch.autograd import Variable class FeatureExtractor(nn.Module):", "as nn import torch import torch.nn.functional as F from torch.autograd import Variable class", "out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0) out =", "lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1]", "dim=1) out = self.fc(out) return out def info_loss(MI, x, x_length, z, u, x_prime,", "def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u,", "self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i]", "nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters()", "self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim,", "i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new", "embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x = self.embedding(sentence) lstm_out,", "= self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i,", "Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2", "target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def forward(self, sentence, z, u, sentence_length):", "i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u), dim=1) out = self.fc(out)", "from torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__()", "def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 =", "= torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u),", "lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0) out", "nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, 1) ) def", "self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim,", "sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden =", "return out def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x,", "self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))],", "embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)", "embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size,", "hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True),", "sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0) out = self.fc(out) return out", "= self.fc(out) return out def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length): Ej", "torch.nn as nn import torch import torch.nn.functional as F from torch.autograd import Variable", "batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim +", "nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x = self.embedding(sentence) lstm_out, lstm_hidden = self.lstm(x)", "nn.Linear(150, 1) ) def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x", "range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u), dim=1) out = self.fc(out) return out", "z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean() return Ej - Em", "batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc =", "info_loss(MI, x, x_length, z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z, u, x_length)).mean()", "super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1", "target_size) ) def forward(self, x, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out,", "= -F.softplus(-MI(x, z, u, x_length)).mean() Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean() return Ej", "lstm3_hidden = self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z,", "target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim,", "out = torch.cat((x_new, z_new, u), dim=1) out = self.fc(out) return out def info_loss(MI,", "MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim", "super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm", "class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim = embedding_dim", "dim=0) out = torch.cat((x_new, z_new, u), dim=1) out = self.fc(out) return out def", "for i in range(len(lstm3_out_z))], dim=0) out = torch.cat((x_new, z_new, u), dim=1) out =", "self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z =", "= self.lstm(x) return lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1", "x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z)", "out def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length): Ej = -F.softplus(-MI(x, z,", "self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim + target_size,", "self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size) ) def forward(self, x,", "batch_first=True) self.fc = nn.Sequential( nn.Linear(2 * hidden_dim + target_size, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150,", "= embedding_dim self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim,", "def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim", "self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential(", "lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in", "= nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(150, target_size)", "self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def", "dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1]", "self.hidden_dim = hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) self.lstm2", ") def forward(self, sentence, z, u, sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence)", "return out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__() self.embedding_dim", "nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(hidden_dim, 150),", "lstm_out class Classifier(nn.Module): def __init__(self, target_size=2, hidden_dim=300): super(Classifier, self).__init__() self.lstm1 = nn.LSTM(hidden_dim, hidden_dim,", "hidden_dim self.embedding = nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence):", "= torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z = self.lstm2(z) lstm3_out_z,", "nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True) self.fc = nn.Sequential( nn.Linear(2 *", "= self.lstm3(lstm2_out) x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0) lstm2_out_z, lstm2_hidden_z", "x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out, lstm3_hidden", "sentence_length): self.lstm1.flatten_parameters() self.lstm2.flatten_parameters() self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden", "= self.lstm2(lstm1_out) out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0)", "self.lstm3.flatten_parameters() x = self.embedding(sentence) lstm1_out, lstm1_hidden = self.lstm1(x) lstm2_out, lstm2_hidden = self.lstm2(lstm1_out) lstm3_out,", "= self.lstm2(z) lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z) z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in", "self.fc(out) return out class MutlInfo(nn.Module): def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300): super(MutlInfo, self).__init__()", "= nn.Embedding(voacb_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True) def forward(self, sentence): x =", "F from torch.autograd import Variable class FeatureExtractor(nn.Module): def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300): super(FeatureExtractor," ]
[ "\\n\" \"When a Household Member Tests Positive Or Becomes Symptomatic Or \\n\" \"When", "\\n\" \"When Participating in Some Private Events)\" ) fig, ax = style_plot(fig, ax)", "Participating in Some Private Events)\" ) fig, ax = style_plot(fig, ax) fig.tight_layout() fig.savefig(produces)", "fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test", "task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth", "Or \\n\" \"When Developing Symptoms but not Receiving a Rapid Test Or \\n\"", "\"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\", }", "\"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index,", "x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of Individuals who", "Becomes Symptomatic Or \\n\" \"When Developing Symptoms but not Receiving a Rapid Test", "/ \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" /", "SRC / \"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\" / \"data\"", ") params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax", "/ \"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\" / \"data\" / \"testing\" /", "import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\":", "\"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params =", "Test Demand\\n\" \"(Share of Individuals who Do a Rapid Test \\n\" \"When a", "lexsort depth may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice)", "\"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\"", "params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot(", "a Rapid Test Or \\n\" \"When Participating in Some Private Events)\" ) fig,", "may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares =", "/ \"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\" / \"data\" /", "warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may impact performance.\" ) params_slice =", "private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares,", "\"params\": BLD / \"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\":", "= plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share", "Rapid Test Demand\\n\" \"(Share of Individuals who Do a Rapid Test \\n\" \"When", "import PLOT_END_DATE from src.config import PLOT_SIZE from src.config import PLOT_START_DATE from src.config import", "} ) @pytask.mark.produces( BLD / \"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" )", "\"When Participating in Some Private Events)\" ) fig, ax = style_plot(fig, ax) fig.tight_layout()", "/ \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC /", "performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig,", "{ \"params\": BLD / \"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\",", "= params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE)", "who Do a Rapid Test \\n\" \"When a Household Member Tests Positive Or", "sns from src.config import BLD from src.config import PLOT_END_DATE from src.config import PLOT_SIZE", "plt import pandas as pd import pytask import seaborn as sns from src.config", "\"When a Household Member Tests Positive Or Becomes Symptomatic Or \\n\" \"When Developing", "y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of Individuals who Do", "produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may", "SRC from src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD", "depth may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares", "not Receiving a Rapid Test Or \\n\" \"When Participating in Some Private Events)\"", "src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\": BLD / \"data\"", "/ \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings():", "import matplotlib.pyplot as plt import pandas as pd import pytask import seaborn as", "BLD / \"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC", "Test Or \\n\" \"When Participating in Some Private Events)\" ) fig, ax =", "pandas as pd import pytask import seaborn as sns from src.config import BLD", "@pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\" /", "import seaborn as sns from src.config import BLD from src.config import PLOT_END_DATE from", "Symptoms but not Receiving a Rapid Test Or \\n\" \"When Participating in Some", ") def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past", "PLOT_SIZE from src.config import PLOT_START_DATE from src.config import SRC from src.plotting.plotting import style_plot", "Developing Symptoms but not Receiving a Rapid Test Or \\n\" \"When Participating in", "get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\"", "from src.config import PLOT_SIZE from src.config import PLOT_START_DATE from src.config import SRC from", "Demand\\n\" \"(Share of Individuals who Do a Rapid Test \\n\" \"When a Household", "\"testing_shared.py\": SRC / \"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\" /", "BLD / \"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces):", "src.config import PLOT_SIZE from src.config import PLOT_START_DATE from src.config import SRC from src.plotting.plotting", "\"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD", "get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, )", "def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort", "= private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private", "\"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with", "Household Member Tests Positive Or Becomes Symptomatic Or \\n\" \"When Developing Symptoms but", "in Some Private Events)\" ) fig, ax = style_plot(fig, ax) fig.tight_layout() fig.savefig(produces) plt.close()", "import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\": BLD / \"data\" /", "\"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\"", "style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\": BLD", "\"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD / \"figures\" / \"data\" / \"testing\"", "Rapid Test \\n\" \"When a Household Member Tests Positive Or Becomes Symptomatic Or", "impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE]", "plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of", "private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid", "SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\", } )", "from src.config import BLD from src.config import PLOT_END_DATE from src.config import PLOT_SIZE from", "from src.config import PLOT_END_DATE from src.config import PLOT_SIZE from src.config import PLOT_START_DATE from", "\"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\",", "past lexsort depth may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares =", "from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\", \"rki\": BLD /", "BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\",", "Positive Or Becomes Symptomatic Or \\n\" \"When Developing Symptoms but not Receiving a", "\"(Share of Individuals who Do a Rapid Test \\n\" \"When a Household Member", "/ \"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params", "pytask import seaborn as sns from src.config import BLD from src.config import PLOT_END_DATE", "src.config import SRC from src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( {", "a Rapid Test \\n\" \"When a Household Member Tests Positive Or Becomes Symptomatic", "Symptomatic Or \\n\" \"When Developing Symptoms but not Receiving a Rapid Test Or", "from src.config import PLOT_START_DATE from src.config import SRC from src.plotting.plotting import style_plot from", "import warnings import matplotlib.pyplot as plt import pandas as pd import pytask import", "= get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax,", "sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of Individuals", "/ \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\",", "import SRC from src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\":", "matplotlib.pyplot as plt import pandas as pd import pytask import seaborn as sns", "/ \"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC /", "pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may impact performance.\" )", "warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\",", "/ \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\", } ) @pytask.mark.produces( BLD /", "a Household Member Tests Positive Or Becomes Symptomatic Or \\n\" \"When Developing Symptoms", "\"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC", "ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\"", "of Individuals who Do a Rapid Test \\n\" \"When a Household Member Tests", "Individuals who Do a Rapid Test \\n\" \"When a Household Member Tests Positive", "Tests Positive Or Becomes Symptomatic Or \\n\" \"When Developing Symptoms but not Receiving", "\\n\" \"When Developing Symptoms but not Receiving a Rapid Test Or \\n\" \"When", "Receiving a Rapid Test Or \\n\" \"When Participating in Some Private Events)\" )", "\"Private Rapid Test Demand\\n\" \"(Share of Individuals who Do a Rapid Test \\n\"", "from src.config import SRC from src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on(", "import pandas as pd import pytask import seaborn as sns from src.config import", "PLOT_END_DATE from src.config import PLOT_SIZE from src.config import PLOT_START_DATE from src.config import SRC", "/ \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\":", "BLD from src.config import PLOT_END_DATE from src.config import PLOT_SIZE from src.config import PLOT_START_DATE", "as pd import pytask import seaborn as sns from src.config import BLD from", "ax=ax, ) ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of Individuals who Do a", "src.config import BLD from src.config import PLOT_END_DATE from src.config import PLOT_SIZE from src.config", "Member Tests Positive Or Becomes Symptomatic Or \\n\" \"When Developing Symptoms but not", "/ \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"])", "\"When Developing Symptoms but not Receiving a Rapid Test Or \\n\" \"When Participating", "\"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings(", "Rapid Test Or \\n\" \"When Participating in Some Private Events)\" ) fig, ax", "params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares = get_piecewise_linear_interpolation(params_slice) private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax =", "Do a Rapid Test \\n\" \"When a Household Member Tests Positive Or Becomes", "/ \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\" / \"shared.py\", } ) @pytask.mark.produces(", "ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of Individuals who Do a Rapid Test", "seaborn as sns from src.config import BLD from src.config import PLOT_END_DATE from src.config", "private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE] fig, ax = plt.subplots(figsize=PLOT_SIZE) sns.lineplot( x=private_demand_shares.index, y=private_demand_shares, ax=ax, ) ax.set_title(", "params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may impact", "Or Becomes Symptomatic Or \\n\" \"When Developing Symptoms but not Receiving a Rapid", "pd import pytask import seaborn as sns from src.config import BLD from src.config", "\"params.pkl\", \"rki\": BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\"", "import pytask import seaborn as sns from src.config import BLD from src.config import", "src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD / \"params.pkl\",", "import BLD from src.config import PLOT_END_DATE from src.config import PLOT_SIZE from src.config import", "warnings import matplotlib.pyplot as plt import pandas as pd import pytask import seaborn", "as plt import pandas as pd import pytask import seaborn as sns from", "but not Receiving a Rapid Test Or \\n\" \"When Participating in Some Private", "\"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on, produces): params = pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing", "src.config import PLOT_START_DATE from src.config import SRC from src.plotting.plotting import style_plot from src.testing.shared", "Test \\n\" \"When a Household Member Tests Positive Or Becomes Symptomatic Or \\n\"", "from src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation @pytask.mark.depends_on( { \"params\": BLD /", "@pytask.mark.produces( BLD / \"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def task_plot_private_test_demand_shares(depends_on,", "\"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" / \"plotting.py\", \"testing_shared.py\": SRC / \"testing\"", "with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may impact performance.\" ) params_slice", "\"ignore\", message=\"indexing past lexsort depth may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")]", "message=\"indexing past lexsort depth may impact performance.\" ) params_slice = params.loc[(\"rapid_test_demand\", \"private_demand\")] private_demand_shares", ") ax.set_title( \"Private Rapid Test Demand\\n\" \"(Share of Individuals who Do a Rapid", "src.config import PLOT_END_DATE from src.config import PLOT_SIZE from src.config import PLOT_START_DATE from src.config", "PLOT_START_DATE from src.config import SRC from src.plotting.plotting import style_plot from src.testing.shared import get_piecewise_linear_interpolation", "import PLOT_SIZE from src.config import PLOT_START_DATE from src.config import SRC from src.plotting.plotting import", "import PLOT_START_DATE from src.config import SRC from src.plotting.plotting import style_plot from src.testing.shared import", "Or \\n\" \"When Participating in Some Private Events)\" ) fig, ax = style_plot(fig,", ") @pytask.mark.produces( BLD / \"figures\" / \"data\" / \"testing\" / \"private_test_demand_shares.pdf\" ) def", "= pd.read_pickle(depends_on[\"params\"]) with warnings.catch_warnings(): warnings.filterwarnings( \"ignore\", message=\"indexing past lexsort depth may impact performance.\"", "as sns from src.config import BLD from src.config import PLOT_END_DATE from src.config import", "\"rki\": BLD / \"data\" / \"processed_time_series\" / \"rki.pkl\", \"plotting.py\": SRC / \"plotting\" /" ]
[ "host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del", "inside the JSON message :param msg: a dict containing any additional fields for", "JSON event with optional additional fields via the given websocket connection. :param websocket:", "socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\")", ":return: \"\"\" if msg is None: msg = {} msg['event'] = event json_msg", "of the \"event\" field inside the JSON message :param msg: a dict containing", "= {} self.host_ids_by_socket = {} def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id],", "\"event\" field inside the JSON message :param msg: a dict containing any additional", "= {} def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket,", "class ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket = {}", "socket=None): if socket: host_id = self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting", "self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id,", "route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id]", "\"\"\" Send a JSON event with optional additional fields via the given websocket", "await websocket.recv() return json.loads(response) def default_json_encoder(o): if isinstance(o, numpy.int64): return int(o) raise TypeError", "self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket)", "current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event: str, msg: Dict = None): \"\"\"", "the given websocket connection. :param websocket: the websocket to send the message on", ":param event: the desired value of the \"event\" field inside the JSON message", "import json import logging from typing import Dict import numpy class ClientSet: def", "for the JSON message to contain :return: \"\"\" if msg is None: msg", "recv_json(websocket): response = await websocket.recv() return json.loads(response) def default_json_encoder(o): if isinstance(o, numpy.int64): return", "def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def add(self,", "'{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def", "def send_json(websocket, event: str, msg: Dict = None): \"\"\" Send a JSON event", "async def send_json(websocket, event: str, msg: Dict = None): \"\"\" Send a JSON", "return self.route_ids_by_host_id async def send_json(websocket, event: str, msg: Dict = None): \"\"\" Send", "websocket: the websocket to send the message on :param event: the desired value", "msg is None: msg = {} msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder)", "hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async", "def clear_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing", "def remove(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] if not socket: socket", "socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from", "return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id)", "event with optional additional fields via the given websocket connection. :param websocket: the", "None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket):", "del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def send_json(websocket,", "get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket]", "to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None): if socket: host_id", "a dict containing any additional fields for the JSON message to contain :return:", "{} def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id", "self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket: host_id", "= self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id]", "value of the \"event\" field inside the JSON message :param msg: a dict", "JSON message to contain :return: \"\"\" if msg is None: msg = {}", "set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to", "with optional additional fields via the given websocket connection. :param websocket: the websocket", "host_id def remove(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] if not socket:", "socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] return", "socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if socket:", "{} self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def add(self, host_id, socket): logging.info(f\"Registered hostID", "connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event: str, msg:", "self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\")", "self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket]", "self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def", "len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event: str, msg: Dict =", "socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\")", "JSON message :param msg: a dict containing any additional fields for the JSON", "'{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket]", "websocket to send the message on :param event: the desired value of the", "any additional fields for the JSON message to contain :return: \"\"\" if msg", "self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event:", "host_id = self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket,", "socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None, socket=None):", "= route_id def clear_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id =", "msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response", "logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None):", "= {} self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def add(self, host_id, socket): logging.info(f\"Registered", "if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if", "connection. :param websocket: the websocket to send the message on :param event: the", "if socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID", "import Dict import numpy class ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id =", "logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return", "logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None, socket=None): if", "typing import Dict import numpy class ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id", "{} self.host_ids_by_socket = {} def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket]", "None: msg = {} msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg)", "via the given websocket connection. :param websocket: the websocket to send the message", "if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None)", "routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None): if", "= self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self,", "routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self):", "host_id=None, socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID", "on :param event: the desired value of the \"event\" field inside the JSON", "the JSON message :param msg: a dict containing any additional fields for the", "def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event: str,", "the websocket to send the message on :param event: the desired value of", "send the message on :param event: the desired value of the \"event\" field", "desired value of the \"event\" field inside the JSON message :param msg: a", "if msg is None: msg = {} msg['event'] = event json_msg = json.dumps(msg,", "from typing import Dict import numpy class ClientSet: def __init__(self): self.sockets_by_host_id = {}", "return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event: str, msg: Dict", "None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket)", "import numpy class ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket", "socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def", "return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket:", "fields via the given websocket connection. :param websocket: the websocket to send the", "Send a JSON event with optional additional fields via the given websocket connection.", "given websocket connection. :param websocket: the websocket to send the message on :param", "self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def add(self, host_id, socket):", "socket=None): if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None):", "'{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None): if socket:", "if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] =", "host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id]", "= {} msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def", "None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self,", "the JSON message to contain :return: \"\"\" if msg is None: msg =", "host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id", "'{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def", "def get_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self,", "msg: a dict containing any additional fields for the JSON message to contain", "message :param msg: a dict containing any additional fields for the JSON message", "= socket, host_id def remove(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] if", "msg = {} msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async", "str, msg: Dict = None): \"\"\" Send a JSON event with optional additional", "message on :param event: the desired value of the \"event\" field inside the", "host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if", "event: str, msg: Dict = None): \"\"\" Send a JSON event with optional", "= json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response = await websocket.recv() return", "self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def", "default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response = await websocket.recv() return json.loads(response) def", "remove(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] if not socket: socket =", "add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self,", "'{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None, socket=None): if socket: host_id", "= self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return", "response = await websocket.recv() return json.loads(response) def default_json_encoder(o): if isinstance(o, numpy.int64): return int(o)", "def current_state(self): return self.route_ids_by_host_id async def send_json(websocket, event: str, msg: Dict = None):", "host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None,", "json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response = await websocket.recv() return json.loads(response)", "= self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None)", "= None): \"\"\" Send a JSON event with optional additional fields via the", "logging from typing import Dict import numpy class ClientSet: def __init__(self): self.sockets_by_host_id =", "host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def", "is None: msg = {} msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder) await", "socket, host_id def remove(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] if not", "a JSON event with optional additional fields via the given websocket connection. :param", "def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None,", "socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id", "get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None):", "None): \"\"\" Send a JSON event with optional additional fields via the given", "hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None): if socket: host_id =", "host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None,", "= self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self,", "self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID", "optional additional fields via the given websocket connection. :param websocket: the websocket to", "contain :return: \"\"\" if msg is None: msg = {} msg['event'] = event", "route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self):", "json import logging from typing import Dict import numpy class ClientSet: def __init__(self):", "to contain :return: \"\"\" if msg is None: msg = {} msg['event'] =", "\"\"\" if msg is None: msg = {} msg['event'] = event json_msg =", "the \"event\" field inside the JSON message :param msg: a dict containing any", "async def recv_json(websocket): response = await websocket.recv() return json.loads(response) def default_json_encoder(o): if isinstance(o,", "__init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def add(self, host_id,", "the desired value of the \"event\" field inside the JSON message :param msg:", "msg: Dict = None): \"\"\" Send a JSON event with optional additional fields", "self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None, socket=None): if socket: host_id =", "self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket]", "json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response = await websocket.recv()", "socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None)", "websocket.send(json_msg) async def recv_json(websocket): response = await websocket.recv() return json.loads(response) def default_json_encoder(o): if", "websocket connection. :param websocket: the websocket to send the message on :param event:", "await websocket.send(json_msg) async def recv_json(websocket): response = await websocket.recv() return json.loads(response) def default_json_encoder(o):", ":param msg: a dict containing any additional fields for the JSON message to", "message to contain :return: \"\"\" if msg is None: msg = {} msg['event']", "self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self,", ":param websocket: the websocket to send the message on :param event: the desired", "import logging from typing import Dict import numpy class ClientSet: def __init__(self): self.sockets_by_host_id", "self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id", "route_id def clear_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id]", "{} msg['event'] = event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket):", "from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return self.route_ids_by_host_id", "self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}' to hostID '{host_id}'\") self.route_ids_by_host_id[host_id] = route_id def clear_route_id(self, host_id=None,", "self.route_ids_by_host_id async def send_json(websocket, event: str, msg: Dict = None): \"\"\" Send a", "self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id):", "def set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning routeID '{route_id}'", "if socket: host_id = self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID", "to send the message on :param event: the desired value of the \"event\"", "dict containing any additional fields for the JSON message to contain :return: \"\"\"", "additional fields for the JSON message to contain :return: \"\"\" if msg is", "containing any additional fields for the JSON message to contain :return: \"\"\" if", "def recv_json(websocket): response = await websocket.recv() return json.loads(response) def default_json_encoder(o): if isinstance(o, numpy.int64):", "logging.info(f\"Removing routeID '{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def", "send_json(websocket, event: str, msg: Dict = None): \"\"\" Send a JSON event with", "Dict = None): \"\"\" Send a JSON event with optional additional fields via", "Dict import numpy class ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {}", "hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id)", "clear_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID", "'{route_id}' from hostID '{host_id}'\") del self.route_ids_by_host_id[host_id] def connected_hosts_count(self): return len(self.host_ids_by_socket) def current_state(self): return", "self.host_ids_by_socket = {} def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] =", "fields for the JSON message to contain :return: \"\"\" if msg is None:", "not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\") self.host_ids_by_socket.pop(socket, None) self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id,", "return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id = self.host_ids_by_socket[socket] logging.info(f\"Assigning", "numpy class ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket =", "= event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response =", "self.route_ids_by_host_id.pop(host_id, None) self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return", "event: the desired value of the \"event\" field inside the JSON message :param", "= await websocket.recv() return json.loads(response) def default_json_encoder(o): if isinstance(o, numpy.int64): return int(o) raise", "def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def get_route_id(self, host_id=None, socket=None): if socket: host_id =", "additional fields via the given websocket connection. :param websocket: the websocket to send", "def add(self, host_id, socket): logging.info(f\"Registered hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def", "event json_msg = json.dumps(msg, default=default_json_encoder) await websocket.send(json_msg) async def recv_json(websocket): response = await", "ClientSet: def __init__(self): self.sockets_by_host_id = {} self.route_ids_by_host_id = {} self.host_ids_by_socket = {} def", "socket: host_id = self.host_ids_by_socket[socket] if not socket: socket = self.sockets_by_host_id[host_id] logging.info(f\"Deleting hostID '{host_id}'\")", "self.sockets_by_host_id.pop(host_id, None) def get_socket(self, host_id): return self.sockets_by_host_id.get(host_id) def get_host_id(self, socket): return self.host_ids_by_socket.get(socket) def", "= self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None, socket=None, route_id=None): if socket: host_id =", "hostID '{host_id}'\") self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id def remove(self, host_id=None, socket=None): if socket:", "the message on :param event: the desired value of the \"event\" field inside", "host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] route_id = self.route_ids_by_host_id[host_id] logging.info(f\"Removing routeID '{route_id}'", "get_route_id(self, host_id=None, socket=None): if socket: host_id = self.host_ids_by_socket[socket] return self.route_ids_by_host_id.get(host_id) def set_route_id(self, host_id=None,", "field inside the JSON message :param msg: a dict containing any additional fields" ]
[ "from jsonschema import Draft4Validator from jsonschema.exceptions import best_match from datetime import datetime from", "= user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response = { \"result\": \"ok\", \"user\": user.get_object()", "= datetime.utcnow() ).save() response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response),", "Setup DATA_PER_PAGE and provides basic validation regarding data states Requirements: None ================================ Name:", "True, created = datetime.utcnow() ).save() response = { \"result\": \"ok\", \"user\": user_obj(user) }", "jsonify, request, abort, render_template import uuid import json from jsonschema import Draft4Validator from", "users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } ) return jsonify(response),", "None ================================ Name: get Parameters: user_id (optional) Role: Data Retrieval Result: Return all", "jsonify(response), 201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return", "if user: response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 200", "= user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username =", "\"previous\" } ) if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\"", "user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response = { \"result\": \"ok\", \"user\":", "\"self\" } ], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" %", "return jsonify(response), 200 def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not user:", "\"/users/?page=%s\" % page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append(", "\"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 201 def put(self, user_id): user =", "user manipulation - Retrieval, Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification", "data states Requirements: None ================================ Name: get Parameters: user_id (optional) Role: Data Retrieval", "= { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 200 else: return jsonify({}),", "DATA_PER_PAGE and provides basic validation regarding data states Requirements: None ================================ Name: get", "Name: post Parameters: None Role: Creates and stores user data on the database", "!= 'GET' and request.method != 'DELETE') and not request.json: abort(400) def get(self, user_id):", "and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied to each", "datetime.utcnow() ).save() response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 201", "Name: Put Parameters: user_id Role: Data modification Result: Return the data in the", "response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 200 else: return", "\"result\": \"ok\", \"user\": user.get_object() } return jsonify(response), 200 def delete(self, user_id): user =", "def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method != 'DELETE') and not", "= user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname =", "decorators applied to each one of the methods Methods: ================================ Name: __init__ Parameters:", "response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append(", "if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user: response = { \"result\": \"ok\",", "data in the modified state Requirements: Authentication ================================ Name: Delete Parameters: user_id Role:", "methods Methods: ================================ Name: __init__ Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE and", "Role: Data modification - logical deletion Result: Confirmation of deletion Requirements: Authentication \"\"\"", "Delete Parameters: user_id Role: Data modification - logical deletion Result: Confirmation of deletion", "database Result: Returns the user stored - or access denied Requirements: Authentication ================================", "user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\")", "users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } ) if users.has_next:", "= users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\": [ {", "user stored - or access denied Requirements: Authentication ================================ Name: Put Parameters: user_id", "Requirements: None ================================ Name: get Parameters: user_id (optional) Role: Data Retrieval Result: Return", "Name: Delete Parameters: user_id Role: Data modification - logical deletion Result: Confirmation of", "state Requirements: Authentication ================================ Name: Delete Parameters: user_id Role: Data modification - logical", "= user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email =", "user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else:", "regarding data states Requirements: None ================================ Name: get Parameters: user_id (optional) Role: Data", "country = user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name", "import UserDB class UserAPI(MethodView): \"\"\" API class for user manipulation - Retrieval, Storage,", "the methods Methods: ================================ Name: __init__ Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE", "modification - logical deletion Result: Confirmation of deletion Requirements: Authentication \"\"\" decorators =", "error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user = User(", "user = User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user.live = False", "user_obj(user) } return jsonify(response), 201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if", "User.objects.filter(external_id=user_id, live=True).first() if user: response = { \"result\": \"ok\", \"user\": user_obj(user) } return", "user.save() response = { \"result\": \"ok\", \"user\": user.get_object() } return jsonify(response), 200 def", "} return jsonify(response), 200 def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not", "201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}),", "def post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}),", "the data in the modified state Requirements: Authentication ================================ Name: Delete Parameters: user_id", "\"ok\", \"user\": user_obj(user) } return jsonify(response), 201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id,", "Put Parameters: user_id Role: Data modification Result: Return the data in the modified", "of deletion Requirements: Authentication \"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method", "API class for user manipulation - Retrieval, Storage, Update and (Logical) Delete Attributes:", "and request.method != 'DELETE') and not request.json: abort(400) def get(self, user_id): if user_id:", "user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response = {", "{ \"result\": \"ok\", \"user\": user.get_object() } return jsonify(response), 200 def delete(self, user_id): user", "Result: Returns the user stored - or access denied Requirements: Authentication ================================ Name:", "to each one of the methods Methods: ================================ Name: __init__ Parameters: None Role:", "jsonschema.exceptions import best_match from datetime import datetime from sys_app.decorators import app_required from user.models.user", "================================ Name: Delete Parameters: user_id Role: Data modification - logical deletion Result: Confirmation", "user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\")", "200 else: return jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users", "from user.models.user import UserDB class UserAPI(MethodView): \"\"\" API class for user manipulation -", "\"user\": user_obj(user) } return jsonify(response), 200 else: return jsonify({}), 404 else: users =", "if not user: return jsonify({}), 404 user.live = False user.save() return jsonify({}), 204", "\"ok\", \"user\": user.get_object() } return jsonify(response), 200 def delete(self, user_id): user = User.objects.filter(external_id=user_id,", "400 else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name =", "external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"),", "name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"), password", "} ) if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" }", "= User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user.live = False user.save()", "error: return jsonify({\"error\": error.message}), 400 else: user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"),", "response = { \"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" % page, \"rel\":", "return jsonify(response), 200 def post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error:", "provides basic validation regarding data states Requirements: None ================================ Name: get Parameters: user_id", "import json from jsonschema import Draft4Validator from jsonschema.exceptions import best_match from datetime import", "request, abort, render_template import uuid import json from jsonschema import Draft4Validator from jsonschema.exceptions", "= User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang", "page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = { \"result\":", "get Parameters: user_id (optional) Role: Data Retrieval Result: Return all users or one", "live=True).first() if user: response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response),", "Parameters: user_id Role: Data modification Result: Return the data in the modified state", "(users.prev_num), \"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num),", ") return jsonify(response), 200 def post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if", "(request.method != 'GET' and request.method != 'DELETE') and not request.json: abort(400) def get(self,", "not request.json: abort(400) def get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if", "password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True, created = datetime.utcnow() ).save()", "user data on the database Result: Returns the user stored - or access", "jsonschema import Draft4Validator from jsonschema.exceptions import best_match from datetime import datetime from sys_app.decorators", "return jsonify({}), 404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\":", "validation regarding data states Requirements: None ================================ Name: get Parameters: user_id (optional) Role:", "= { \"result\": \"ok\", \"user\": user.get_object() } return jsonify(response), 200 def delete(self, user_id):", "user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"),", "Data modification - logical deletion Result: Confirmation of deletion Requirements: Authentication \"\"\" decorators", "user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response = { \"result\": \"ok\", \"user\": user.get_object() }", "import Draft4Validator from jsonschema.exceptions import best_match from datetime import datetime from sys_app.decorators import", "request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user =", "data on the database Result: Returns the user stored - or access denied", "\"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\"", "================================ Name: post Parameters: None Role: Creates and stores user data on the", "<reponame>hiperlogic/scsr-api<filename>scsr_api/user/api.py<gh_stars>1-10 from flask.views import MethodView from flask import jsonify, request, abort, render_template import", "================================ Name: __init__ Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE and provides basic", "user_id Role: Data modification - logical deletion Result: Confirmation of deletion Requirements: Authentication", "User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang =", "user_obj(user) } return jsonify(response), 200 else: return jsonify({}), 404 else: users = UserDB.objects.filter(live=True)", "[ { \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) }", "not user: return jsonify({}), 404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error:", "if not user: return jsonify({}), 404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if", "per_page = self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\"", "stored - or access denied Requirements: Authentication ================================ Name: Put Parameters: user_id Role:", "from sys_app.decorators import app_required from user.models.user import UserDB class UserAPI(MethodView): \"\"\" API class", "Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE and provides basic validation regarding data", "= user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response = { \"result\":", "created = datetime.utcnow() ).save() response = { \"result\": \"ok\", \"user\": user_obj(user) } return", "Data modification Result: Return the data in the modified state Requirements: Authentication ================================", "or access denied Requirements: Authentication ================================ Name: Put Parameters: user_id Role: Data modification", "- or access denied Requirements: Authentication ================================ Name: post Parameters: None Role: Creates", "Parameters: user_id Role: Data modification - logical deletion Result: Confirmation of deletion Requirements:", "return jsonify(response), 201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user:", "Authentication ================================ Name: Put Parameters: user_id Role: Data modification Result: Return the data", "user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password", "Data Retrieval Result: Return all users or one specific user if requested -", "if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } ) return", "= UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user_json = request.json error", "if error: return jsonify({\"error\": error.message}), 400 else: user = User( external_id=str(uuid.uuid4()), country =", "= UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response =", "{ \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) } if", "app_required from user.models.user import UserDB class UserAPI(MethodView): \"\"\" API class for user manipulation", "Methods: ================================ Name: __init__ Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE and provides", "Result: Return the data in the modified state Requirements: Authentication ================================ Name: Delete", "Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied", "user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True, created = datetime.utcnow()", "- or access denied Requirements: Authentication ================================ Name: Put Parameters: user_id Role: Data", "modified state Requirements: Authentication ================================ Name: Delete Parameters: user_id Role: Data modification -", "user_id Role: Data modification Result: Return the data in the modified state Requirements:", "of the methods Methods: ================================ Name: __init__ Parameters: None Role: Constructor Result: Setup", "user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response = { \"result\": \"ok\",", "jsonify(response), 200 def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not user: return", "\"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) } if users.has_prev:", "quantification parameter decorators: decorators applied to each one of the methods Methods: ================================", "decorators: decorators applied to each one of the methods Methods: ================================ Name: __init__", "bio = user_json.ger(\"bio\"), live = True, created = datetime.utcnow() ).save() response = {", "= best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user = User( external_id=str(uuid.uuid4()),", "return jsonify({\"error\": error.message}), 400 else: user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state", "= user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"), password =", "= best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\") user.state", "% (users.next_num), \"rel\": \"next\" } ) return jsonify(response), 200 def post(self): user_json =", "error.message}), 400 else: user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"),", "200 def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}),", "denied Requirements: Authentication ================================ Name: post Parameters: None Role: Creates and stores user", "user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\")", "manipulation - Retrieval, Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter", "user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"),", "Parameters: None Role: Creates and stores user data on the database Result: Returns", "and not request.json: abort(400) def get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first()", "response = { \"result\": \"ok\", \"user\": user.get_object() } return jsonify(response), 200 def delete(self,", "request.json: abort(400) def get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user:", "Confirmation of deletion Requirements: Authentication \"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if", "request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user.country =", "else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE)", "\"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" } ],", "jsonify({}), 404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}),", "user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"),", "= User.objects.filter(external_id=user_id, live=True).first() if user: response = { \"result\": \"ok\", \"user\": user_obj(user) }", "], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\":", "Requirements: Authentication ================================ Name: post Parameters: None Role: Creates and stores user data", "if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } ) if", "username = user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live", "Creates and stores user data on the database Result: Returns the user stored", "jsonify(response), 200 def post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return", "and provides basic validation regarding data states Requirements: None ================================ Name: get Parameters:", "= user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username =", "- Retrieval, Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators:", "(users.next_num), \"rel\": \"next\" } ) return jsonify(response), 200 def post(self): user_json = request.json", "best_match from datetime import datetime from sys_app.decorators import app_required from user.models.user import UserDB", "states Requirements: None ================================ Name: get Parameters: user_id (optional) Role: Data Retrieval Result:", "Role: Creates and stores user data on the database Result: Returns the user", "[app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method != 'DELETE') and", "= user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password =", "datetime import datetime from sys_app.decorators import app_required from user.models.user import UserDB class UserAPI(MethodView):", "for user manipulation - Retrieval, Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination", "user = User.objects.filter(external_id=user_id, live=True).first() if user: response = { \"result\": \"ok\", \"user\": user_obj(user)", "json from jsonschema import Draft4Validator from jsonschema.exceptions import best_match from datetime import datetime", "on the database Result: Returns the user stored - or access denied Requirements:", "uuid import json from jsonschema import Draft4Validator from jsonschema.exceptions import best_match from datetime", "\"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append( { \"href\":", "access denied Requirements: Authentication ================================ Name: post Parameters: None Role: Creates and stores", "import jsonify, request, abort, render_template import uuid import json from jsonschema import Draft4Validator", "= True, created = datetime.utcnow() ).save() response = { \"result\": \"ok\", \"user\": user_obj(user)", "Requirements: Authentication ================================ Name: Put Parameters: user_id Role: Data modification Result: Return the", "def get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user: response =", "the user stored - or access denied Requirements: Authentication ================================ Name: Put Parameters:", "Constructor Result: Setup DATA_PER_PAGE and provides basic validation regarding data states Requirements: None", "one specific user if requested - or access denied Requirements: Authentication ================================ Name:", "= self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" %", ") if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } )", "Role: Data Retrieval Result: Return all users or one specific user if requested", "\"rel\": \"next\" } ) return jsonify(response), 200 def post(self): user_json = request.json error", "parameter decorators: decorators applied to each one of the methods Methods: ================================ Name:", "Name: __init__ Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE and provides basic validation", "or access denied Requirements: Authentication ================================ Name: post Parameters: None Role: Creates and", "= user_json.ger(\"bio\"), live = True, created = datetime.utcnow() ).save() response = { \"result\":", "================================ Name: get Parameters: user_id (optional) Role: Data Retrieval Result: Return all users", "jsonify(response), 200 else: return jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1))", "user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"),", "jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page", "!= 'DELETE') and not request.json: abort(400) def get(self, user_id): if user_id: user =", "} return jsonify(response), 200 else: return jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page", "error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\")", "user_json.get(\"password\") user.save() response = { \"result\": \"ok\", \"user\": user.get_object() } return jsonify(response), 200", "= user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True, created =", "import MethodView from flask import jsonify, request, abort, render_template import uuid import json", "post Parameters: None Role: Creates and stores user data on the database Result:", "UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user_json = request.json error =", "sys_app.decorators import app_required from user.models.user import UserDB class UserAPI(MethodView): \"\"\" API class for", "% (users.prev_num), \"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" %", "basic validation regarding data states Requirements: None ================================ Name: get Parameters: user_id (optional)", "UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = {", "Draft4Validator from jsonschema.exceptions import best_match from datetime import datetime from sys_app.decorators import app_required", "user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"),", "= int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = { \"result\": \"ok\",", "access denied Requirements: Authentication ================================ Name: Put Parameters: user_id Role: Data modification Result:", "user_id (optional) Role: Data Retrieval Result: Return all users or one specific user", "MethodView from flask import jsonify, request, abort, render_template import uuid import json from", "from datetime import datetime from sys_app.decorators import app_required from user.models.user import UserDB class", "class UserAPI(MethodView): \"\"\" API class for user manipulation - Retrieval, Storage, Update and", "= user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email =", "\"next\" } ) return jsonify(response), 200 def post(self): user_json = request.json error =", "response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } ) return jsonify(response), 200", "jsonify({\"error\": error.message}), 400 else: user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state =", "put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user_json", "live=True).first() if not user: return jsonify({}), 404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json))", "user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user: response = { \"result\": \"ok\", \"user\":", "Retrieval, Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators", "400 else: user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"), city", "def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404", "from flask import jsonify, request, abort, render_template import uuid import json from jsonschema", "user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\")", "- logical deletion Result: Confirmation of deletion Requirements: Authentication \"\"\" decorators = [app_required]", "Return the data in the modified state Requirements: Authentication ================================ Name: Delete Parameters:", "live = True, created = datetime.utcnow() ).save() response = { \"result\": \"ok\", \"user\":", "Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied to each one of the", "from jsonschema.exceptions import best_match from datetime import datetime from sys_app.decorators import app_required from", "\"user\": user_obj(user) } return jsonify(response), 201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first()", "\"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } ) return jsonify(response), 200 def post(self): user_json", "Return all users or one specific user if requested - or access denied", "in the modified state Requirements: Authentication ================================ Name: Delete Parameters: user_id Role: Data", "404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400", "\"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\":", "{ \"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } ) return jsonify(response), 200 def", "email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True, created", "= [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method != 'DELETE')", "\"\"\" API class for user manipulation - Retrieval, Storage, Update and (Logical) Delete", "= user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True, created = datetime.utcnow() ).save() response", "200 def post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\":", "404 else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page =", "Retrieval Result: Return all users or one specific user if requested - or", "error.message}), 400 else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name", "specific user if requested - or access denied Requirements: Authentication ================================ Name: post", "UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" }", "= user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live =", "Pagination quantification parameter decorators: decorators applied to each one of the methods Methods:", "(Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied to each one", "surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio", "the modified state Requirements: Authentication ================================ Name: Delete Parameters: user_id Role: Data modification", "DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied to each one of the methods", "return jsonify(response), 200 else: return jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page =", "================================ Name: Put Parameters: user_id Role: Data modification Result: Return the data in", "user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user_json = request.json", "Parameters: user_id (optional) Role: Data Retrieval Result: Return all users or one specific", "Name: get Parameters: user_id (optional) Role: Data Retrieval Result: Return all users or", "abort(400) def get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user: response", "deletion Result: Confirmation of deletion Requirements: Authentication \"\"\" decorators = [app_required] def __init__(self):", "users or one specific user if requested - or access denied Requirements: Authentication", "user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user.live =", "else: user = User( external_id=str(uuid.uuid4()), country = user_json.get(\"country\"), state = user_json.get(\"state\"), city =", "user.get_object() } return jsonify(response), 200 def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if", "{ \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append( {", "Role: Data modification Result: Return the data in the modified state Requirements: Authentication", "def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404", "users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\": [", "import uuid import json from jsonschema import Draft4Validator from jsonschema.exceptions import best_match from", "} ) return jsonify(response), 200 def post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json))", "user.models.user import UserDB class UserAPI(MethodView): \"\"\" API class for user manipulation - Retrieval,", "flask import jsonify, request, abort, render_template import uuid import json from jsonschema import", "stores user data on the database Result: Returns the user stored - or", "\"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" } ], \"users\":", "logical deletion Result: Confirmation of deletion Requirements: Authentication \"\"\" decorators = [app_required] def", "best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\") user.state =", "users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\": [ { \"href\":", "user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user: response = { \"result\":", "Returns the user stored - or access denied Requirements: Authentication ================================ Name: Put", "users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response", "live=True).first() if not user: return jsonify({}), 404 user.live = False user.save() return jsonify({}),", "response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 201 def put(self,", "__init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method != 'DELETE') and not request.json:", "from flask.views import MethodView from flask import jsonify, request, abort, render_template import uuid", "user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username", "{ \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 200 else: return jsonify({}), 404", "render_template import uuid import json from jsonschema import Draft4Validator from jsonschema.exceptions import best_match", "import best_match from datetime import datetime from sys_app.decorators import app_required from user.models.user import", "user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True,", "__init__ Parameters: None Role: Constructor Result: Setup DATA_PER_PAGE and provides basic validation regarding", "= user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname =", "Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied to each one of", "page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\":", "= user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response", "None Role: Constructor Result: Setup DATA_PER_PAGE and provides basic validation regarding data states", "None Role: Creates and stores user data on the database Result: Returns the", "Requirements: Authentication ================================ Name: Delete Parameters: user_id Role: Data modification - logical deletion", "\"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method", "self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" % page,", ").save() response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 201 def", "UserAPI(MethodView): \"\"\" API class for user manipulation - Retrieval, Storage, Update and (Logical)", "each one of the methods Methods: ================================ Name: __init__ Parameters: None Role: Constructor", "\"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\"", "deletion Requirements: Authentication \"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method !=", "self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method != 'DELETE') and not request.json: abort(400)", "modification Result: Return the data in the modified state Requirements: Authentication ================================ Name:", "% page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( {", "user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save()", "} ], \"users\": UserDB.users_obj(users) } if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num),", "'GET' and request.method != 'DELETE') and not request.json: abort(400) def get(self, user_id): if", "applied to each one of the methods Methods: ================================ Name: __init__ Parameters: None", "import datetime from sys_app.decorators import app_required from user.models.user import UserDB class UserAPI(MethodView): \"\"\"", "get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id, live=True).first() if user: response = {", "Result: Setup DATA_PER_PAGE and provides basic validation regarding data states Requirements: None ================================", "\"links\": [ { \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" } ], \"users\": UserDB.users_obj(users)", "= user_json.get(\"country\"), state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name =", "= user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio =", "Authentication \"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and", "error: return jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city", "one of the methods Methods: ================================ Name: __init__ Parameters: None Role: Constructor Result:", "UserDB class UserAPI(MethodView): \"\"\" API class for user manipulation - Retrieval, Storage, Update", "requested - or access denied Requirements: Authentication ================================ Name: post Parameters: None Role:", "and stores user data on the database Result: Returns the user stored -", "(optional) Role: Data Retrieval Result: Return all users or one specific user if", "else: return jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users =", "return jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city =", "user_json.get(\"surname\"), username = user_json.get(\"username\"), email = user_json.get(\"email\"), password = user_json.get(\"password\"), bio = user_json.ger(\"bio\"),", "user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user_json =", "Requirements: Authentication \"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET'", "post(self): user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400", "user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email = user_json.get(\"email\") user.password = user_json.get(\"password\") user.save() response =", "} if users.has_prev: response[\"links\"].append( { \"href\": \"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } )", "user: return jsonify({}), 404 user_json = request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return", "= { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 201 def put(self, user_id):", "lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username = user_json.get(\"username\"), email", "if requested - or access denied Requirements: Authentication ================================ Name: post Parameters: None", "datetime from sys_app.decorators import app_required from user.models.user import UserDB class UserAPI(MethodView): \"\"\" API", "Result: Confirmation of deletion Requirements: Authentication \"\"\" decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10", "or one specific user if requested - or access denied Requirements: Authentication ================================", "Authentication ================================ Name: post Parameters: None Role: Creates and stores user data on", "user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"),", "if error: return jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\")", "user.password = user_json.get(\"password\") user.save() response = { \"result\": \"ok\", \"user\": user.get_object() } return", "user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname", "user.city = user_json.get(\"city\") user.name = user_json.get(\"name\") user.surname = user_json.get(\"surname\") user.username = user_json.get(\"username\") user.email", "= request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user", "if (request.method != 'GET' and request.method != 'DELETE') and not request.json: abort(400) def", "User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user.live = False user.save() return", "Result: Return all users or one specific user if requested - or access", "flask.views import MethodView from flask import jsonify, request, abort, render_template import uuid import", "Role: Constructor Result: Setup DATA_PER_PAGE and provides basic validation regarding data states Requirements:", "decorators = [app_required] def __init__(self): self.DATA_PER_PAGE=10 if (request.method != 'GET' and request.method !=", "} return jsonify(response), 201 def put(self, user_id): user = UserDB.objects.filter(external_id=user_id, live=True).first() if not", "abort, render_template import uuid import json from jsonschema import Draft4Validator from jsonschema.exceptions import", "'DELETE') and not request.json: abort(400) def get(self, user_id): if user_id: user = User.objects.filter(external_id=user_id,", "Authentication ================================ Name: Delete Parameters: user_id Role: Data modification - logical deletion Result:", "\"ok\", \"user\": user_obj(user) } return jsonify(response), 200 else: return jsonify({}), 404 else: users", "\"/users/?page=%s\" % (users.prev_num), \"rel\": \"previous\" } ) if users.has_next: response[\"links\"].append( { \"href\": \"/users/?page=%s\"", "class for user manipulation - Retrieval, Storage, Update and (Logical) Delete Attributes: DATA_PER_PAGE:", "Update and (Logical) Delete Attributes: DATA_PER_PAGE: Pagination quantification parameter decorators: decorators applied to", "user if requested - or access denied Requirements: Authentication ================================ Name: post Parameters:", "denied Requirements: Authentication ================================ Name: Put Parameters: user_id Role: Data modification Result: Return", "jsonify({\"error\": error.message}), 400 else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\")", "all users or one specific user if requested - or access denied Requirements:", "{ \"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\" }", "= user_json.get(\"password\") user.save() response = { \"result\": \"ok\", \"user\": user.get_object() } return jsonify(response),", "= { \"result\": \"ok\", \"links\": [ { \"href\": \"/users/?page=%s\" % page, \"rel\": \"self\"", "{ \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 201 def put(self, user_id): user", "city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname = user_json.get(\"surname\"), username", "state = user_json.get(\"state\"), city = user_json.get(\"city\"), lang = user_json.get(\"lang\"), name = user_json.get(\"name\"), surname", "else: user.country = user_json.get(\"country\") user.state = user_json.get(\"state\") user.city = user_json.get(\"city\") user.name = user_json.get(\"name\")", "\"user\": user.get_object() } return jsonify(response), 200 def delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first()", "the database Result: Returns the user stored - or access denied Requirements: Authentication", "return jsonify({}), 404 else: users = UserDB.objects.filter(live=True) page = int(request.args.get('page',1)) users = users.paginate(page=page,", "request.method != 'DELETE') and not request.json: abort(400) def get(self, user_id): if user_id: user", "int(request.args.get('page',1)) users = users.paginate(page=page, per_page = self.DATA_PER_PAGE) response = { \"result\": \"ok\", \"links\":", "user_json.ger(\"bio\"), live = True, created = datetime.utcnow() ).save() response = { \"result\": \"ok\",", "\"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 200 else: return jsonify({}), 404 else:", "best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user = User( external_id=str(uuid.uuid4()), country", "user_json.get(\"password\"), bio = user_json.ger(\"bio\"), live = True, created = datetime.utcnow() ).save() response =", "import app_required from user.models.user import UserDB class UserAPI(MethodView): \"\"\" API class for user", "user: response = { \"result\": \"ok\", \"user\": user_obj(user) } return jsonify(response), 200 else:", "\"href\": \"/users/?page=%s\" % (users.next_num), \"rel\": \"next\" } ) return jsonify(response), 200 def post(self):", "= request.json error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json)) if error: return jsonify({\"error\": error.message}), 400 else: user.country", "delete(self, user_id): user = User.objects.filter(external_id=user_id, live=True).first() if not user: return jsonify({}), 404 user.live" ]
[ "[] for i in l: if i % 2 == 0: s +=", "_ in range(int(input())): l = [*map(int, input().split())] s, m = 0, [] for", "% 2 == 0: s += i m.append(i) m.sort() print(s, end=' ') print(m[0])", "m = 0, [] for i in l: if i % 2 ==", "i in l: if i % 2 == 0: s += i m.append(i)", "in range(int(input())): l = [*map(int, input().split())] s, m = 0, [] for i", "= 0, [] for i in l: if i % 2 == 0:", "l: if i % 2 == 0: s += i m.append(i) m.sort() print(s,", "0, [] for i in l: if i % 2 == 0: s", "[*map(int, input().split())] s, m = 0, [] for i in l: if i", "i % 2 == 0: s += i m.append(i) m.sort() print(s, end=' ')", "for i in l: if i % 2 == 0: s += i", "if i % 2 == 0: s += i m.append(i) m.sort() print(s, end='", "= [*map(int, input().split())] s, m = 0, [] for i in l: if", "l = [*map(int, input().split())] s, m = 0, [] for i in l:", "range(int(input())): l = [*map(int, input().split())] s, m = 0, [] for i in", "input().split())] s, m = 0, [] for i in l: if i %", "<reponame>juseongkr/BOJ for _ in range(int(input())): l = [*map(int, input().split())] s, m = 0,", "s, m = 0, [] for i in l: if i % 2", "for _ in range(int(input())): l = [*map(int, input().split())] s, m = 0, []", "in l: if i % 2 == 0: s += i m.append(i) m.sort()" ]
[ "functools def dict_zip(*dictionaries): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for", "d for d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x,", "in dictionaries), set()) return { key: tuple(d.get(key, fillvalue) for d in dictionaries) for", "{ key: tuple(d[key] for d in dictionaries) for key in common_keys if all(key", "x, y: x | y, (set(d.keys()) for d in dictionaries), set()) return {", "dictionaries) for key in common_keys if all(key in d for d in dictionaries)", "{ key: tuple(d.get(key, fillvalue) for d in dictionaries) for key in common_keys }", "for d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y:", "} def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys())", "functools.reduce(lambda x, y: x | y, (set(d.keys()) for d in dictionaries), set()) return", "in dictionaries) for key in common_keys if all(key in d for d in", "d in dictionaries), set()) return { key: tuple(d.get(key, fillvalue) for d in dictionaries)", "y: x | y, (set(d.keys()) for d in dictionaries), set()) return { key:", "in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y: x |", "d in dictionaries), set()) return { key: tuple(d[key] for d in dictionaries) for", "common_keys if all(key in d for d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None):", "in d for d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda", "common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for d in dictionaries),", "def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for", "fillvalue=None): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for d in", "(set(d.keys()) for d in dictionaries), set()) return { key: tuple(d.get(key, fillvalue) for d", "all(key in d for d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys =", "for d in dictionaries), set()) return { key: tuple(d[key] for d in dictionaries)", "| y, (set(d.keys()) for d in dictionaries), set()) return { key: tuple(d.get(key, fillvalue)", "dictionaries), set()) return { key: tuple(d.get(key, fillvalue) for d in dictionaries) for key", "dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y: x | y,", "key: tuple(d[key] for d in dictionaries) for key in common_keys if all(key in", "tuple(d[key] for d in dictionaries) for key in common_keys if all(key in d", "for d in dictionaries) for key in common_keys if all(key in d for", "dict_zip(*dictionaries): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for d in", "for d in dictionaries), set()) return { key: tuple(d.get(key, fillvalue) for d in", "return { key: tuple(d[key] for d in dictionaries) for key in common_keys if", "import functools def dict_zip(*dictionaries): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys())", "if all(key in d for d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys", "return { key: tuple(d.get(key, fillvalue) for d in dictionaries) for key in common_keys", "key in common_keys if all(key in d for d in dictionaries) } def", "x | y, (set(d.keys()) for d in dictionaries), set()) return { key: tuple(d.get(key,", "y, (set(d.keys()) for d in dictionaries), set()) return { key: tuple(d.get(key, fillvalue) for", "(set(d.keys()) for d in dictionaries), set()) return { key: tuple(d[key] for d in", "dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for d", "set()) return { key: tuple(d.get(key, fillvalue) for d in dictionaries) for key in", "def dict_zip(*dictionaries): common_keys = functools.reduce(lambda x, y: x | y, (set(d.keys()) for d", "= functools.reduce(lambda x, y: x | y, (set(d.keys()) for d in dictionaries), set())", "d in dictionaries) } def dict_zip_longest(*dictionaries, fillvalue=None): common_keys = functools.reduce(lambda x, y: x", "in dictionaries), set()) return { key: tuple(d[key] for d in dictionaries) for key", "y, (set(d.keys()) for d in dictionaries), set()) return { key: tuple(d[key] for d", "in common_keys if all(key in d for d in dictionaries) } def dict_zip_longest(*dictionaries,", "set()) return { key: tuple(d[key] for d in dictionaries) for key in common_keys", "| y, (set(d.keys()) for d in dictionaries), set()) return { key: tuple(d[key] for", "dictionaries), set()) return { key: tuple(d[key] for d in dictionaries) for key in", "d in dictionaries) for key in common_keys if all(key in d for d", "for key in common_keys if all(key in d for d in dictionaries) }", "x | y, (set(d.keys()) for d in dictionaries), set()) return { key: tuple(d[key]" ]
[ "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "# ignore port, URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\")", "same type for valid comparison, except for ``is_type`` where compare parameter must be", "raise invalid type\") for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\",", "evaluate these use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases", "post to get the 'password' is called in 'login' module # this combination", "utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\")", "is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True)", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url,", "setUpClass(cls): cls.version = __meta__.__version__ # only local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\"", "instance while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1,", "True) def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest)", "did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def", "format expected as JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None,", "\"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"],", "called with above 'test_request' should catch the final 'HTTPInternalServerError' that is # raised", "# avoid raising forever if the real safeguard fails doing its job if", "the safeguard did not do its job # if it did not get", "the raised error will itself # call 'raise_http' again each time operation fails,", "import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool", "is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda:", "raise\" ) def test_guess_target_format_default(self): request = utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON)", "is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False,", "utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags", "end up into an endless recursive call stack of raised errors. \"\"\" mock_calls", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\",", ">= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_,", "ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda:", "\"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True)", "'evaluate_call' itself raising to # trigger 'mock_raise' recursively within 'raise_http' function. # Since", "in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]:", "with invalid credentials will call \"/signin\" followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\".", "signin route and another on the redirected internal login _paths = [\"/signin\", \"/signin_internal\"]", "flags expecting param_compare to be a type while value provided is not utils.check_raises(lambda:", "\"\"\" # compare flags expecting a value (can only consider it bad request", "``param`` and ``param_compare`` must be of same type for valid comparison, except for", "base_url + path, \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self):", "a controlled fashion. Therefore, error to be raised is an 'expected' validation failure", "because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True,", "have been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for url in [\"http://localhost\",", "\"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs", "# URL fixed with missing scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\",", "raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa", "raised error will itself # call 'raise_http' again each time operation fails, creating", "generated by any test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}):", "not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden)", "\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url =", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" test_utils ---------------------------------- Tests for the", "[\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"),", "of comparison between values. We evaluate these use cases here. .. seealso:: -", "msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError,", "\"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)):", "error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should be", "utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda:", "stack of raised errors. \"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_, **__): #", "= utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends", "for the various utility operations employed by Magpie. \"\"\" import os import unittest", "recursive safeguard does its job, it should end up raising 'HTTPInternalServerError' directly #", "import runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS", "\"Proxied path should have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "resulting in error during response generation should raise\" ) def test_guess_target_format_default(self): request =", "param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True)", "use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def", "internal server error instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]),", "from .env when running all tests. # Always need to provide a settings", "\"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL", "\"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_, **__): # avoid raising forever if", "url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp =", "{\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for", "needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError,", "utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\",", "via 'evaluate_call' itself raising to # trigger 'mock_raise' recursively within 'raise_http' function. #", "unittest from distutils.version import LooseVersion import mock import six from pyramid.httpexceptions import HTTPBadRequest,", "+ path, \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\"", "noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON),", "ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), #", "URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url = utils.check_no_raise(lambda:", "for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop in mock) each", "mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") #", "should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified", "TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise):", "each time a post to get the 'password' is called in 'login' module", "define in settings), # otherwise 'get_constant' can find the current thread settings generated", "detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError)", "test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid", "test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments", "HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should", "called in 'login' module # this combination should happen twice, one in signin", "# Always need to provide a settings container (even empty direct when nothing", "route and another on the redirected internal login _paths = [\"/signin\", \"/signin_internal\"] app", "a settings container (even empty direct when nothing define in settings), # otherwise", "utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp,", "for ``is_type`` where compare parameter must be the type directly. .. versionchanged:: 2.0", "# invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{};", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"],", "def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must be of same type for", "'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda", "flag needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa", "{\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore", "error during response generation should raise\" ) def test_guess_target_format_default(self): request = utils.mock_request() content_type,", "param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError)", "= ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\")", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1,", "mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends up calling the", "where compare parameter must be the type directly. .. versionchanged:: 2.0 Since ``param``", "\"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\",", "found if not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\")", "test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\",", "get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has priority url = utils.check_no_raise(lambda:", "URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing scheme even if defined", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url,", "be a type. Inversely, ``param_compare`` must not be a type if ``is_type`` is", "up to format all response prior to return, the raised error will itself", "is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True),", "when compare flags expect a value but type is provided, should still detect", "\"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path", "\"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with", "ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True)", "parameter must be the type directly. .. versionchanged:: 2.0 Since ``param`` can come", "call stack of raised errors. \"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_, **__):", "True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every case, since it can", "ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import runner, utils", "int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\":", "mock_calls = {\"counter\": 0} def mock_raise(*_, **__): # avoid raising forever if the", "default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True),", "\"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\", (\",\", \";\"),", "request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should have", "is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare to", "LooseVersion import mock import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from", "not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise error should be", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "\"/signin\" followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use", "pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from magpie import", "caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with incorrect", "**{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\"", "{}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with fixed \"MAGPIE_URL\" within the", "ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True),", "``param_compare`` must be of same type for valid comparison, except for ``is_type`` where", "env URL found if not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) #", "Since tweens are set up to format all response prior to return, the", "utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\":", "\"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\",", "noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1],", "non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal function", "{\"counter\": 0} def mock_raise(*_, **__): # avoid raising forever if the real safeguard", "get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content =", "formatting and generation of a resulting HTTP response raises itself an error (because", "use-cases \"\"\" # compare flags expecting a value (can only consider it bad", "ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True),", "\"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\")", "value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True),", "operations employed by Magpie. \"\"\" import os import unittest from distutils.version import LooseVersion", "not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int,", "@runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__ # only local", "comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True),", "# (without further formatting attempt when reaching the MAX value), stopping the endless", "asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\",", "up raising 'HTTPInternalServerError' directly # (without further formatting attempt when reaching the MAX", "utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self):", "(pop in mock) each time a post to get the 'password' is called", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\":", "int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self):", "in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp = utils.test_request(app,", "= utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v =", "import __meta__, constants from magpie.api import exception as ax from magpie.api import generic", "\"http://localhost/magpie\"]: # paths are reduced (pop in mock) each time a post to", "expecting param_compare to be some container instance while value provided is not utils.check_raises(lambda:", "\"\"\" Test multiple request routing with fixed \"MAGPIE_URL\" within the API application. Signin", "# if our counter reached higher than the MAX (i.e.: 2*MAX from mock),", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\",", "raising 'HTTPInternalServerError' directly # (without further formatting attempt when reaching the MAX value),", "split in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON)", "raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self):", "param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True),", "test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None)", "def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1,", "mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app =", "in a controlled fashion. Therefore, error to be raised is an 'expected' validation", "should end up raising 'HTTPInternalServerError' directly # (without further formatting attempt when reaching", "\"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with incorrect type should be caught\")", "\"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal string representation", "be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must be of same", "cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\",", "\"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") #", "it does not end up into an endless recursive call stack of raised", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True,", "{}, {}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error during", "param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True)", "constants globals() for every case, since it can pre-loaded from .env when running", "type if ``is_type`` is not requested, but other flags require some form of", "response formatter via 'evaluate_call' itself raising to # trigger 'mock_raise' recursively within 'raise_http'", "**kwargs): if args[1] != \"password\": return real_func(*args, **kwargs) request, args = args[0], args[1:]", "constants from magpie.api import exception as ax from magpie.api import generic as ag", "error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest)", "ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest)", "is an 'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead of runtime", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest)", "[\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v =", "must not be a type if ``is_type`` is not requested, but other flags", "come from user input, we should **NOT** raise ``HTTPInternalServerError`` because the whole point", "from magpie.api import requests as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url", "msg=\"missing 'param_compare' for flag needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True,", "ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing scheme even if", "of the method is to ensure that values are compared accordingly in a", "\"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\",", "paths are reduced (pop in mock) each time a post to get the", "from magpie.api import exception as ax from magpie.api import generic as ag from", "ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True)", "ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\",", "resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should", "*_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers =", "not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with incorrect type should be caught\") utils.check_raises(lambda:", "directly. .. versionchanged:: 2.0 Since ``param`` can come from user input, we should", "\"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}'", "def mock_raise(*_, **__): # avoid raising forever if the real safeguard fails doing", "container (even empty direct when nothing define in settings), # otherwise 'get_constant' can", "utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url =", "use case if internal function that handles formatting and generation of a resulting", "msg=\"invalid arguments resulting in error during response generation should raise\" ) def test_guess_target_format_default(self):", "even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\":", "local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app", "ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\") def", "should not raise: {}\".format(exc)) # if our counter reached higher than the MAX", "be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it should", "sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from", "should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of utility is", "= \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version", "import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from magpie import __meta__,", "# noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content,", "ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self):", "but other flags require some form of comparison between values. We evaluate these", "path, \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test", "utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with incorrect type", "utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\")", "not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str,", "errors. \"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_, **__): # avoid raising forever", "ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True,", "\"/session\", expect_errors=True) except AssertionError: # Request called with above 'test_request' should catch the", "invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)}", "auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: #", "(no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def", "url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if not in", "param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\",", "That error is again re-raised as 'AssertionError' pass except Exception as exc: self.fail(\"unexpected", "utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v =", "loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: # Request called with above 'test_request'", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden),", "is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True,", "utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0,", "when nothing define in settings), # otherwise 'get_constant' can find the current thread", "utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True),", "ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): #", "utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden),", "not requested, but other flags require some form of comparison between values. We", "utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp", "test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises internal server error instead of 'normal", "trigger 'mock_raise' recursively within 'raise_http' function. # Since tweens are set up to", "'HTTPInternalServerError' directly # (without further formatting attempt when reaching the MAX value), stopping", "for split in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split),", "than the MAX (i.e.: 2*MAX from mock), the safeguard did not do its", "its job, it should end up raising 'HTTPInternalServerError' directly # (without further formatting", "test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of utility is raised accordingly. \"\"\" utils.check_raises(lambda:", "= utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v =", "is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden)", "we know that ``param_compare`` must be a type. Inversely, ``param_compare`` must not be", "generic as ag from magpie.api import requests as ar from magpie.utils import CONTENT_TYPE_JSON,", "exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def", "'login' module # this combination should happen twice, one in signin route and", "msg=\"missing any flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), #", "ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting", "the current thread settings generated by any test app with mock.patch.object(constants, \"MAGPIE_URL\", None):", "incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag", "mock), the safeguard did not do its job # if it did not", "utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp =", "method is to ensure that values are compared accordingly in a controlled fashion.", "usage of function raises internal server error instead of 'normal HTTP error'. \"\"\"", "of implementation issue), while it is processing another pre-raised error, that it does", "msg=\"invalid content format expected as JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises(", "\"https://test-server.com\") # ignore port, URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url,", "``param_compare`` must not be a type if ``is_type`` is not requested, but other", "if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else:", "import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1", "because the whole point of the method is to ensure that values are", "\"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"}))", "ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it should be caught\") utils.check_raises(lambda:", "since it can pre-loaded from .env when running all tests. # Always need", "param_compare to be some container instance while value provided is not utils.check_raises(lambda: ax.verify_param(1,", "reaching the MAX value), stopping the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except", "test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\")", "should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must be of", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "whole point of the method is to ensure that values are compared accordingly", "= utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url", "'mock_raise' recursively within 'raise_http' function. # Since tweens are set up to format", "__meta__, constants from magpie.api import exception as ax from magpie.api import generic as", "ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as JSON serializable should", "(without further formatting attempt when reaching the MAX value), stopping the endless loop.", "CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1 =", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url", "\"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\",", "2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_, **__):", "\"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\": return real_func(*args, **kwargs) request,", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.scheme\": \"https\"})) utils.check_val_equal(url,", "expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers", "Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked", "callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal", "side_effect=mock_lambda_call): # Call request that ends up calling the response formatter via 'evaluate_call'", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest)", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url,", "a resulting HTTP response raises itself an error (because of implementation issue), while", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest)", "mock_raise(*_, **__): # avoid raising forever if the real safeguard fails doing its", "utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare", "[URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with fixed \"MAGPIE_URL\" within", "raised errors. \"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_, **__): # avoid raising", "settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") #", "should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it", "None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url =", "not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden),", "container instance while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda:", "\"\"\" import os import unittest from distutils.version import LooseVersion import mock import six", "test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with fixed \"MAGPIE_URL\" within the API application.", "over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if", "flags expecting a value (can only consider it bad request because comparison values", "not_in=True), HTTPInternalServerError) # strings cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True))", "other flags require some form of comparison between values. We evaluate these use", "\"\"\" Validate use case if internal function that handles formatting and generation of", "values. We evaluate these use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid", "\"\"\" test_utils ---------------------------------- Tests for the various utility operations employed by Magpie. \"\"\"", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled correctly (no raise)", "HTTPInternalServerError) # compare flags expecting param_compare to be some container instance while value", "these use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\"", "class to raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect", "auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with fixed \"MAGPIE_URL\"", "bad request because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda:", "is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True),", "param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError)", "Validate use case if internal function that handles formatting and generation of a", "to format all response prior to return, the raised error will itself #", "have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with", "We evaluate these use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input", "incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) #", "= asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v,", "args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should have been auto-resolved [URL:", "be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class", "# call 'raise_http' again each time operation fails, creating recursive raises. # If", "utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp =", "utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp,", "not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with", "utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self):", "ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types,", "request creation should not raise: {}\".format(exc)) # if our counter reached higher than", "the final 'HTTPInternalServerError' that is # raised directly instead of usual TestResponse returned.", "during request creation should not raise: {}\".format(exc)) # if our counter reached higher", ") def test_guess_target_format_default(self): request = utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where,", "= ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v,", "error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should", "various utility operations employed by Magpie. \"\"\" import os import unittest from distutils.version", "``is_type`` where compare parameter must be the type directly. .. versionchanged:: 2.0 Since", "pyramid.settings import asbool from magpie import __meta__, constants from magpie.api import exception as", "mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\",", "LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: #", "Invalid usage of function raises internal server error instead of 'normal HTTP error'.", "utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp,", "with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\",", "+ 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200,", "ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True),", "tests. # Always need to provide a settings container (even empty direct when", "still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True),", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has priority url =", "noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error during response generation should raise\" )", "\"http://localhost:9871\") # URL fixed with missing scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\":", "_paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__:", "or whichever ``http_error`` provided) instead of runtime 'unexpected' processing error. On the other", "content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants", "HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa", "ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) #", "test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must be of same type for valid", "safeguard fails doing its job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError()", "expect a value but type is provided, should still detect incorrect input utils.check_raises(lambda:", "fails, creating recursive raises. # If recursive safeguard does its job, it should", "type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must be", "response generation should raise\" ) def test_guess_target_format_default(self): request = utils.mock_request() content_type, where =", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags expect a value", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed", "HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int,", "\"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should have been auto-resolved", "once, use cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1)))", "expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\")", "user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp,", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\",", "__meta__.__version__ # only local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in", "another on the redirected internal login _paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\":", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\",", "[\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda:", "msg=\"incorrect non-iterable compare should raise invalid type\") for flag in [\"not_none\", \"not_empty\", \"not_in\",", "from magpie import __meta__, constants from magpie.api import exception as ax from magpie.api", "HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid type\") for flag in [\"not_none\", \"not_empty\",", "utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True),", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden),", "# compare flags expecting a value (can only consider it bad request because", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\")", "tweens are set up to format all response prior to return, the raised", "return real_func(request, *args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced", "\"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda:", "\"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"],", "OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum", "utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: # Request called with above 'test_request' should", "should have been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for url in", "not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url,", "for invalid input use-cases \"\"\" # compare flags expecting a value (can only", "# If recursive safeguard does its job, it should end up raising 'HTTPInternalServerError'", "but type is provided, should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True),", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest)", "HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from magpie import __meta__, constants", "with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\", \"password\":", "the MAX value), stopping the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError:", "specified with incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare'", "HTTPBadRequest) # when compare flags expect a value but type is provided, should", "resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v", "Therefore, error to be raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever", "each time operation fails, creating recursive raises. # If recursive safeguard does its", "should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str,", "the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: # Request called with", "if our counter reached higher than the MAX (i.e.: 2*MAX from mock), the", "with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings", "'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification", "= utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default", "it can pre-loaded from .env when running all tests. # Always need to", "is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True,", "it is processing another pre-raised error, that it does not end up into", "= ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True)", "the 'password' is called in 'login' module # this combination should happen twice,", "as ax from magpie.api import generic as ag from magpie.api import requests as", "\",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp,", "``http_error`` provided) instead of runtime 'unexpected' processing error. On the other hand, when", "# only local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in [\"http://localhost\",", "'test_request' should catch the final 'HTTPInternalServerError' that is # raised directly instead of", "utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should be caught\".format(flag)) def", "is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda:", "that values are compared accordingly in a controlled fashion. Therefore, error to be", "error instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing", "# Request called with above 'test_request' should catch the final 'HTTPInternalServerError' that is", "URL found if not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored,", "credentials will call \"/signin\" followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\"", "path = \"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path,", "use cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) #", "utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it should be caught\")", "# Call request that ends up calling the response formatter via 'evaluate_call' itself", "\"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"}))", "resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\")", "get called at least more than once, use cases did not really get", "[\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\", (\",\",", "test_utils ---------------------------------- Tests for the various utility operations employed by Magpie. \"\"\" import", "``is_type`` is not requested, but other flags require some form of comparison between", "is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda:", "cls.version = __meta__.__version__ # only local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for", "'{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must", "one in signin route and another on the redirected internal login _paths =", "ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of", "lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting in", "# copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be different\")", "within 'raise_http' function. # Since tweens are set up to format all response", "this combination should happen twice, one in signin route and another on the", "= [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked,", "distutils.version import LooseVersion import mock import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError,", "Arguments ``param`` and ``param_compare`` must be of same type for valid comparison, except", "utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name", "non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content", "raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should", "envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if not", "CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every case, since", "param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\",", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"}))", "is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda:", "param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise error should", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False,", "ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "\"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as", "= \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__ #", "raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare", "ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with incorrect type should", "param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True)", "ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\",", "\"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp", "into an endless recursive call stack of raised errors. \"\"\" mock_calls = {\"counter\":", "utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1,", "headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\",", "expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\",", "http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise error should be caught\")", "\"\"\" Arguments ``param`` and ``param_compare`` must be of same type for valid comparison,", "utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp,", "if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise TypeError()", "path should have been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for url", "provided, should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\",", "real_func(*args, **kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path", "app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp)", "consider it bad request because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True),", "coding: utf-8 -*- \"\"\" test_utils ---------------------------------- Tests for the various utility operations employed", "MAX value), stopping the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: #", "ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1,", "test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda:", "utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting", "ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error", "# raised directly instead of usual TestResponse returned. That error is again re-raised", "for flag needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), #", "import unittest from distutils.version import LooseVersion import mock import six from pyramid.httpexceptions import", "class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod", "accordingly in a controlled fashion. Therefore, error to be raised is an 'expected'", "\"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers,", "raising forever if the real safeguard fails doing its job if mock_calls[\"counter\"] >=", "is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden)", "valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1,", "tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL", "use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" #", "to ensure that values are compared accordingly in a controlled fashion. Therefore, error", "\"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda:", "utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every case,", "TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__ # only local test def test_magpie_prefix_direct_request(self):", "True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp", "its job # if it did not get called at least more than", "is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True),", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare to be a", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True,", "\"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\",", "**kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop in mock)", "HTTPInternalServerError, msg=\"invalid arguments resulting in error during response generation should raise\" ) def", "ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True)", "{}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error during response", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs url =", "= \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\": return real_func(*args, **kwargs)", "{\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if", "\"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as JSON serializable should raise\"", "failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead of runtime 'unexpected' processing error. On", "= __meta__.__version__ # only local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url", "CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version)", "and ``param_compare`` must be of same type for valid comparison, except for ``is_type``", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\",", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1,", "is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True)", "up calling the response formatter via 'evaluate_call' itself raising to # trigger 'mock_raise'", "# if it did not get called at least more than once, use", "endless recursive call stack of raised errors. \"\"\" mock_calls = {\"counter\": 0} def", "def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa HTTPInternalServerError, msg=\"invalid", "than once, use cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX +", "class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__ # only local test def", "utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda:", "type. Inversely, ``param_compare`` must not be a type if ``is_type`` is not requested,", "[\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_,", "'unexpected' processing error. On the other hand, when ``is_type`` flag is requested, we", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden)", "provided) instead of runtime 'unexpected' processing error. On the other hand, when ``is_type``", "def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with", "not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest)", "with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"],", "get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda:", "= ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v,", "in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\",", "is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare to be a type while value", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}):", "from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import runner, utils class", "followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\".", "as 'AssertionError' pass except Exception as exc: self.fail(\"unexpected error during request creation should", "is called in 'login' module # this combination should happen twice, one in", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has priority", "# compare flags expecting param_compare to be a type while value provided is", "for every case, since it can pre-loaded from .env when running all tests.", "MAX (i.e.: 2*MAX from mock), the safeguard did not do its job #", "mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority", "= DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should", "v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\"))", "re-raised as 'AssertionError' pass except Exception as exc: self.fail(\"unexpected error during request creation", "get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if not in settings url =", "utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends up", "job # if it did not get called at least more than once,", "time a post to get the 'password' is called in 'login' module #", "user input, we should **NOT** raise ``HTTPInternalServerError`` because the whole point of the", "param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError)", "any flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa", "param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "in settings), # otherwise 'get_constant' can find the current thread settings generated by", "requested, we know that ``param_compare`` must be a type. Inversely, ``param_compare`` must not", "is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True)", "raised directly instead of usual TestResponse returned. That error is again re-raised as", "ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags expect a value but type", "finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked", "utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags", "been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]:", "raises internal server error instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\",", "charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in", "ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True)", "compared accordingly in a controlled fashion. Therefore, error to be raised is an", "creation should not raise: {}\".format(exc)) # if our counter reached higher than the", "app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data", "ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types,", "from distutils.version import LooseVersion import mock import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden,", "\"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"}))", "utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\":", "thread settings generated by any test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ,", "not raise: {}\".format(exc)) # if our counter reached higher than the MAX (i.e.:", "is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda:", "utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp =", "not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\",", "again re-raised as 'AssertionError' pass except Exception as exc: self.fail(\"unexpected error during request", "as exc: self.fail(\"unexpected error during request creation should not raise: {}\".format(exc)) # if", "except for ``is_type`` where compare parameter must be the type directly. .. versionchanged::", "ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True,", "param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1,", "compare flags expecting param_compare to be some container instance while value provided is", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\",", "utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str,", "\"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\")", "ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid", "\"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp", "Since ``param`` can come from user input, we should **NOT** raise ``HTTPInternalServerError`` because", "versionchanged:: 2.0 Since ``param`` can come from user input, we should **NOT** raise", "ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True),", "ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v,", "during response generation should raise\" ) def test_guess_target_format_default(self): request = utils.mock_request() content_type, where", "not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) #", "# with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\",", "def test_guess_target_format_default(self): request = utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True)", "with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends up calling the response formatter", "def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal function that handles formatting and", "cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa", "a value but type is provided, should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1,", "list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises(", "\"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\",", "exception as ax from magpie.api import generic as ag from magpie.api import requests", "API application. Signin with invalid credentials will call \"/signin\" followed by sub-request \"/signin_internal\"", "(i.e.: 2*MAX from mock), the safeguard did not do its job # if", "magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import runner, utils class DummyEnum(ExtendedEnum):", "= \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path =", "globals() for every case, since it can pre-loaded from .env when running all", "not do its job # if it did not get called at least", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"}))", "within the API application. Signin with invalid credentials will call \"/signin\" followed by", "doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\")", "param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda:", "2*MAX from mock), the safeguard did not do its job # if it", "really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content", "\"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version =", "def mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\": return real_func(*args, **kwargs) request, args", "that ends up calling the response formatter via 'evaluate_call' itself raising to #", "type is provided, should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError)", "type\") for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\",", "resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v", "every case, since it can pre-loaded from .env when running all tests. #", "HTTPInternalServerError) # compare flags expecting param_compare to be a type while value provided", "param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int,", "the method is to ensure that values are compared accordingly in a controlled", "response raises itself an error (because of implementation issue), while it is processing", "the MAX (i.e.: 2*MAX from mock), the safeguard did not do its job", "= {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format", "ag from magpie.api import requests as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header,", "asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True)", "requests as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import", "\"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\")", "msg=\"flag specified with incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing", "python # -*- coding: utf-8 -*- \"\"\" test_utils ---------------------------------- Tests for the various", "HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from magpie import __meta__, constants from magpie.api", "case, since it can pre-loaded from .env when running all tests. # Always", "instead of usual TestResponse returned. That error is again re-raised as 'AssertionError' pass", "priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing scheme even if defined with", "\"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\",", "should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), #", "ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\",", "utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda:", "*_, **__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\":", "*args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop in", "# otherwise 'get_constant' can find the current thread settings generated by any test", "as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests import runner,", "that ``param_compare`` must be a type. Inversely, ``param_compare`` must not be a type", "raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\",", "scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda:", "value (can only consider it bad request because comparison values are valid) utils.check_raises(lambda:", "value), stopping the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: # Request", "try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"),", "[\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop in mock) each time a post", "seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" # compare flags expecting a", "calling the response formatter via 'evaluate_call' itself raising to # trigger 'mock_raise' recursively", "except Exception as exc: self.fail(\"unexpected error during request creation should not raise: {}\".format(exc))", "HTTPInternalServerError, msg=\"incorrect HTTP class to raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1,", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden)", "form of comparison between values. We evaluate these use cases here. .. seealso::", "\"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url,", "CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as JSON serializable should raise\" ) def", "\"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func,", "point of the method is to ensure that values are compared accordingly in", "magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if", "username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for", "= {\"counter\": 0} def mock_raise(*_, **__): # avoid raising forever if the real", "matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises internal server error", "get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url =", "utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should have been", "split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None)", "param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError)", "(even empty direct when nothing define in settings), # otherwise 'get_constant' can find", "\"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing scheme", ".. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" # compare flags expecting", "v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp,", "matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden),", "ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True),", "= ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for", "utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that", "1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with", "that incorrect usage of utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid", "is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden)", "not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden),", "\"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\":", "{\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"}))", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with", "the real safeguard fails doing its job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX:", "= ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\",", "format all response prior to return, the raised error will itself # call", "compare flags expecting a value (can only consider it bad request because comparison", "utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum):", "get_header, get_magpie_url from tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2", "recursively within 'raise_http' function. # Since tweens are set up to format all", "# env URL found if not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"}))", "data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp", "\"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url", "get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url =", "is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1)", "\"MAGPIE_URL\" within the API application. Signin with invalid credentials will call \"/signin\" followed", "+= 1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app()", "utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid type\") for", "get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\")", "six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from", "\"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0],", "\"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\"", "ensure that values are compared accordingly in a controlled fashion. Therefore, error to", "utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True,", "requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"],", "resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\")", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden),", "priority over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found", "expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\",", "param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\",", "'raise_http' function. # Since tweens are set up to format all response prior", "content format expected as JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda:", "param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\",", "# -*- coding: utf-8 -*- \"\"\" test_utils ---------------------------------- Tests for the various utility", "did not do its job # if it did not get called at", "= utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should have", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare", "\"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't", "[\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp = utils.test_request(app, \"GET\",", "utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase):", "utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda:", "headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers,", "\"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}),", "not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True,", "\"https://test-server.com\") # settings priority over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") #", "must be a type. Inversely, ``param_compare`` must not be a type if ``is_type``", "---------------------------------- Tests for the various utility operations employed by Magpie. \"\"\" import os", "instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any", "fixed with missing scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}):", "some container instance while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError)", "\"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url,", "values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest)", "ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self):", "valid comparison, except for ``is_type`` where compare parameter must be the type directly.", "param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden),", "msg=\"concrete enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage", "flag is requested, we know that ``param_compare`` must be a type. Inversely, ``param_compare``", "URL fixed with missing scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\":", "string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\"", "empty direct when nothing define in settings), # otherwise 'get_constant' can find the", "specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag", "utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise", "employed by Magpie. \"\"\" import os import unittest from distutils.version import LooseVersion import", "param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags expect", "should have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing", "raises itself an error (because of implementation issue), while it is processing another", "value but type is provided, should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int,", "param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None,", "JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\",", "error, that it does not end up into an endless recursive call stack", "def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp =", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url =", "param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError)", "happen twice, one in signin route and another on the redirected internal login", "OtherEnum.VALUE1, msg=\"concrete enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect", "ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True),", "\"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\")", "'password' is called in 'login' module # this combination should happen twice, one", "utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as JSON", "ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\")", "# user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials", "utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"),", "if the real safeguard fails doing its job if mock_calls[\"counter\"] >= 2 *", "if ``is_type`` is not requested, but other flags require some form of comparison", ":func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" # compare flags expecting a value (can", "DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1", "routing with fixed \"MAGPIE_URL\" within the API application. Signin with invalid credentials will", "should **NOT** raise ``HTTPInternalServerError`` because the whole point of the method is to", "above 'test_request' should catch the final 'HTTPInternalServerError' that is # raised directly instead", "utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self):", "ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None)", "the API application. Signin with invalid credentials will call \"/signin\" followed by sub-request", "fails doing its job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"]", "of runtime 'unexpected' processing error. On the other hand, when ``is_type`` flag is", "in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop in mock) each time a", "param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with incorrect type should be", "non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable", "and generation of a resulting HTTP response raises itself an error (because of", "internal login _paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda", "param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid type\") for flag in", "while it is processing another pre-raised error, that it does not end up", "import os import unittest from distutils.version import LooseVersion import mock import six from", "call 'raise_http' again each time operation fails, creating recursive raises. # If recursive", "**__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\":", "ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden),", "utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"],", "v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\")", "our counter reached higher than the MAX (i.e.: 2*MAX from mock), the safeguard", "error. On the other hand, when ``is_type`` flag is requested, we know that", "url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\",", "is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\",", "ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\",", "input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare", "app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs url = utils.check_no_raise(lambda:", "only local test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]:", "utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden)", "HTTP response raises itself an error (because of implementation issue), while it is", "for valid comparison, except for ``is_type`` where compare parameter must be the type", "\"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should", "multiple request routing with fixed \"MAGPIE_URL\" within the API application. Signin with invalid", "issue), while it is processing another pre-raised error, that it does not end", "mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"}", "args = args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should have been", "test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\",", "ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True),", "\"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\",", "exc: self.fail(\"unexpected error during request creation should not raise: {}\".format(exc)) # if our", "mock) each time a post to get the 'password' is called in 'login'", "expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers =", "ends up calling the response formatter via 'evaluate_call' itself raising to # trigger", "doing its job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] +=", "ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True, http_error=HTTPForbidden), HTTPForbidden)", "be a type if ``is_type`` is not requested, but other flags require some", "utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\":", "running all tests. # Always need to provide a settings container (even empty", "utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest)", "# paths are reduced (pop in mock) each time a post to get", "ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every", "def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"),", "utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "raises. # If recursive safeguard does its job, it should end up raising", "ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\",", "utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) return", "\"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal string", "be a type while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError)", "= utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v", "get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1] !=", "is # raised directly instead of usual TestResponse returned. That error is again", "of utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call'", "least more than once, use cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2,", "'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal function that", "if internal function that handles formatting and generation of a resulting HTTP response", "\"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp = utils.test_request(app, \"GET\", path)", "2.0 Since ``param`` can come from user input, we should **NOT** raise ``HTTPInternalServerError``", "(because of implementation issue), while it is processing another pre-raised error, that it", "HTTPInternalServerError, msg=\"invalid content format expected as JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self):", "should catch the final 'HTTPInternalServerError' that is # raised directly instead of usual", "def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1, param_compare=int, is_type=True)", "that is # raised directly instead of usual TestResponse returned. That error is", "utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 =", "module # this combination should happen twice, one in signin route and another", "DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def", "utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True,", "by Magpie. \"\"\" import os import unittest from distutils.version import LooseVersion import mock", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\":", "ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False)", "CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp", "utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs url", "ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises internal server", "can find the current thread settings generated by any test app with mock.patch.object(constants,", "is_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True,", "Inversely, ``param_compare`` must not be a type if ``is_type`` is not requested, but", "while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True,", "VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__", "comparison between values. We evaluate these use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage`", "value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True),", "resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v", "utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\",", "not get called at least more than once, use cases did not really", "utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should be caught\") utils.check_raises(lambda:", "TestResponse returned. That error is again re-raised as 'AssertionError' pass except Exception as", "param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"),", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting param_compare to be", "import asbool from magpie import __meta__, constants from magpie.api import exception as ax", "False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): #", "processing another pre-raised error, that it does not end up into an endless", "ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\",", "base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\": return real_func(*args,", "test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class", "param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest)", "**kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\",", "final 'HTTPInternalServerError' that is # raised directly instead of usual TestResponse returned. That", "input, we should **NOT** raise ``HTTPInternalServerError`` because the whole point of the method", "an 'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead of runtime 'unexpected'", "is_in=True) ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True)", "True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp =", "an endless recursive call stack of raised errors. \"\"\" mock_calls = {\"counter\": 0}", "with missing scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url", "# when compare flags expect a value but type is provided, should still", "import LooseVersion import mock import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk", "combination should happen twice, one in signin route and another on the redirected", "\"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__ # only", "\"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\",", "\"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type", "msg=\"incorrect HTTP class to raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True),", "pass except Exception as exc: self.fail(\"unexpected error during request creation should not raise:", "mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON,", "with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends up calling", "function. # Since tweens are set up to format all response prior to", "self.fail(\"unexpected error during request creation should not raise: {}\".format(exc)) # if our counter", "it should end up raising 'HTTPInternalServerError' directly # (without further formatting attempt when", "# noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error during response generation should raise\"", "@classmethod def setUpClass(cls): cls.version = __meta__.__version__ # only local test def test_magpie_prefix_direct_request(self): base_url", "\"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda:", "provide a settings container (even empty direct when nothing define in settings), #", "1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments", "ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True),", "def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of utility is raised accordingly. \"\"\"", "validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead of runtime 'unexpected' processing error.", "# Disable constants globals() for every case, since it can pre-loaded from .env", "DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value", "any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def", "param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True)", "!= \"password\": return real_func(*args, **kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url +", "not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self):", "param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True,", "raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda:", "missing scheme even if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url =", "= utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data =", "get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs url = utils.check_no_raise(lambda: get_magpie_url({}))", "recursive call stack of raised errors. \"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_,", "True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\")", "``param_compare`` must be a type. Inversely, ``param_compare`` must not be a type if", "# settings priority over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env", "application. Signin with invalid credentials will call \"/signin\" followed by sub-request \"/signin_internal\" and", "requested, but other flags require some form of comparison between values. We evaluate", "\"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True),", "does not end up into an endless recursive call stack of raised errors.", "not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True),", "should raise invalid type\") for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\",", "for url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp", "test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal function that handles formatting and generation", "def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"VALUE1\"), DummyEnum.VALUE1) utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self):", "utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should have been auto-resolved [URL: {}].\".format(url))", "= utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): #", "find the current thread settings generated by any test app with mock.patch.object(constants, \"MAGPIE_URL\",", "callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid", "= utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has priority url", "ax from magpie.api import generic as ag from magpie.api import requests as ar", "with incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for", "of usual TestResponse returned. That error is again re-raised as 'AssertionError' pass except", "**__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON}", "\"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest)", "error to be raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error``", "ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\",", "know that ``param_compare`` must be a type. Inversely, ``param_compare`` must not be a", "is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags expect a", "(can only consider it bad request because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\",", "\"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v,", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True)", "except AssertionError: # Request called with above 'test_request' should catch the final 'HTTPInternalServerError'", "ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True),", "formatter via 'evaluate_call' itself raising to # trigger 'mock_raise' recursively within 'raise_http' function.", "avoid raising forever if the real safeguard fails doing its job if mock_calls[\"counter\"]", "+ _paths.pop(0), \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args,", "usage of utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda", "name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\",", "as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\":", "get the 'password' is called in 'login' module # this combination should happen", "def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises internal server error instead of", "\"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url", "usual TestResponse returned. That error is again re-raised as 'AssertionError' pass except Exception", "utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest)", "error will itself # call 'raise_http' again each time operation fails, creating recursive", "is requested, we know that ``param_compare`` must be a type. Inversely, ``param_compare`` must", "= utils.mock_request(\"/some/path?QUERY=VALUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v =", "as JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {},", "= utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v", "redirected internal login _paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\",", "compare flags expecting param_compare to be a type while value provided is not", "param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting param_compare to be some container instance", "prior to return, the raised error will itself # call 'raise_http' again each", "utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable", "utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\":", "correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True)", "# compare flags expecting param_compare to be some container instance while value provided", "to get the 'password' is called in 'login' module # this combination should", "\";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp =", "``HTTPInternalServerError`` because the whole point of the method is to ensure that values", "function that handles formatting and generation of a resulting HTTP response raises itself", "hand, when ``is_type`` flag is requested, we know that ``param_compare`` must be a", "ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda:", "url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL", "v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda:", "in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def", "type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing", "generation of a resulting HTTP response raises itself an error (because of implementation", "utf-8 -*- \"\"\" test_utils ---------------------------------- Tests for the various utility operations employed by", "is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda:", "should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback'", "server error instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError,", "as ag from magpie.api import requests as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum,", "caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid type\")", "magpie.api import generic as ag from magpie.api import requests as ar from magpie.utils", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare to be", "generation should raise\" ) def test_guess_target_format_default(self): request = utils.mock_request() content_type, where = ag.guess_target_format(request)", "else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\":", "param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare to be a type while", "get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing", "returned. That error is again re-raised as 'AssertionError' pass except Exception as exc:", "utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if not in settings url = utils.check_no_raise(lambda:", "path should have been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request", "login _paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_,", "\"GET\", \"/session\", expect_errors=True) except AssertionError: # Request called with above 'test_request' should catch", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1,", "test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\",", "CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"):", "from tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\"", "current thread settings generated by any test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with", "get_magpie_url from tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 =", "expect_errors=True) except AssertionError: # Request called with above 'test_request' should catch the final", "ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden),", "function raises internal server error instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\",", "another pre-raised error, that it does not end up into an endless recursive", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\",", "internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self):", "\"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int,", "is provided, should still detect incorrect input utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda:", "other hand, when ``is_type`` flag is requested, we know that ``param_compare`` must be", "test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url})", "param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest)", "0} def mock_raise(*_, **__): # avoid raising forever if the real safeguard fails", "ax.verify_param(1, param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\",", "when running all tests. # Always need to provide a settings container (even", "HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda:", "Magpie. \"\"\" import os import unittest from distutils.version import LooseVersion import mock import", "resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with", "utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags expect a value but", "otherwise 'get_constant' can find the current thread settings generated by any test app", "param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled correctly", "import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1]", "\"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port,", "type for valid comparison, except for ``is_type`` where compare parameter must be the", "ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\")", "from magpie.api import generic as ag from magpie.api import requests as ar from", "utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default error", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting param_compare to be some", "response prior to return, the raised error will itself # call 'raise_http' again", "noqa HTTPInternalServerError, msg=\"flag specified with incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True),", "between values. We evaluate these use cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for", "def setUpClass(cls): cls.version = __meta__.__version__ # only local test def test_magpie_prefix_direct_request(self): base_url =", "is again re-raised as 'AssertionError' pass except Exception as exc: self.fail(\"unexpected error during", "[URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths", "= args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should have been auto-resolved", "are set up to format all response prior to return, the raised error", "be of same type for valid comparison, except for ``is_type`` where compare parameter", "utility operations employed by Magpie. \"\"\" import os import unittest from distutils.version import", "formatting attempt when reaching the MAX value), stopping the endless loop. utils.test_request(app, \"GET\",", "handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"],", "with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda:", "magpie.api import requests as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from", "ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises", "from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs):", "from mock), the safeguard did not do its job # if it did", "args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied path should have been auto-resolved [URL: {}].\".format(url))", "whichever ``http_error`` provided) instead of runtime 'unexpected' processing error. On the other hand,", "catch the final 'HTTPInternalServerError' that is # raised directly instead of usual TestResponse", "at least more than once, use cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"],", "settings container (even empty direct when nothing define in settings), # otherwise 'get_constant'", "is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda:", "should happen twice, one in signin route and another on the redirected internal", "ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid type\") for flag", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list,", "will itself # call 'raise_http' again each time operation fails, creating recursive raises.", "from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from magpie", "{\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app,", "while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list,", "HTTPInternalServerError, msg=\"missing any flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None),", "\",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp", "to provide a settings container (even empty direct when nothing define in settings),", "forever if the real safeguard fails doing its job if mock_calls[\"counter\"] >= 2", "with fixed \"MAGPIE_URL\" within the API application. Signin with invalid credentials will call", "compare flags expect a value but type is provided, should still detect incorrect", "param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden)", "attempt when reaching the MAX value), stopping the endless loop. utils.test_request(app, \"GET\", \"/session\",", "ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v,", "json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp,", "'get_constant' can find the current thread settings generated by any test app with", "expecting a value (can only consider it bad request because comparison values are", "HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import asbool from magpie import __meta__, constants from", "\"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp", "not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\",", "a post to get the 'password' is called in 'login' module # this", "{}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error during response generation should", "param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises internal server error instead", "headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v,", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url,", "instead of runtime 'unexpected' processing error. On the other hand, when ``is_type`` flag", "request routing with fixed \"MAGPIE_URL\" within the API application. Signin with invalid credentials", "when reaching the MAX value), stopping the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True)", "not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\",", "\"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\")", "can pre-loaded from .env when running all tests. # Always need to provide", "utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url", "utils.get_test_magpie_app({\"magpie.url\": url}) with mock.patch(\"magpie.api.requests.get_value_multiformat_body_checked\", side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\":", "not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function", "all response prior to return, the raised error will itself # call 'raise_http'", "VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls):", "\"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should be caught\".format(flag))", "param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting param_compare", "handles formatting and generation of a resulting HTTP response raises itself an error", "url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\")", "non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as JSON serializable should raise\" )", "\"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests", "is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting param_compare to", "param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\",", "side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)): data = {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers", "error during request creation should not raise: {}\".format(exc)) # if our counter reached", "copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be different\") def", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\",", "\"\"\" Invalid usage of function raises internal server error instead of 'normal HTTP", "HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as", "invalid credentials will call \"/signin\" followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both", "-*- coding: utf-8 -*- \"\"\" test_utils ---------------------------------- Tests for the various utility operations", "raising to # trigger 'mock_raise' recursively within 'raise_http' function. # Since tweens are", "def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda:", "arguments resulting in error during response generation should raise\" ) def test_guess_target_format_default(self): request", "ignore port, URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url", ".. versionchanged:: 2.0 Since ``param`` can come from user input, we should **NOT**", "operation fails, creating recursive raises. # If recursive safeguard does its job, it", "mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends up calling the response formatter via", "be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid", "elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of utility", "type directly. .. versionchanged:: 2.0 Since ``param`` can come from user input, we", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1,", "ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise error", "utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str,", "utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int),", "\"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag '{}' type should be", "and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import", "itself an error (because of implementation issue), while it is processing another pre-raised", "values are compared accordingly in a controlled fashion. Therefore, error to be raised", "'raise_http' again each time operation fails, creating recursive raises. # If recursive safeguard", "further formatting attempt when reaching the MAX value), stopping the endless loop. utils.test_request(app,", "\"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args,", "reached higher than the MAX (i.e.: 2*MAX from mock), the safeguard did not", "os import unittest from distutils.version import LooseVersion import mock import six from pyramid.httpexceptions", "v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp,", "if defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"}))", "that handles formatting and generation of a resulting HTTP response raises itself an", "be raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead", "mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def", "utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url =", "error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True,", "with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\")", "processing error. On the other hand, when ``is_type`` flag is requested, we know", "HTTPInternalServerError, msg=\"flag specified with incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError,", "= asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def test_verify_param_proper_verifications_raised(self): # with default error utils.check_raises(lambda: ax.verify_param(\"b\",", "real_func(request, *args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop", "'param_compare' for flag needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk),", "invalid type\") for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\",", "utils.check_val_equal(DummyEnum.get(\"random\"), None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value #", "\"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url,", "ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True,", "credentials utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name", "(``HTTPBadRequest`` or whichever ``http_error`` provided) instead of runtime 'unexpected' processing error. On the", "\"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid flag", "we should **NOT** raise ``HTTPInternalServerError`` because the whole point of the method is", "mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\")", "non-iterable compare should raise invalid type\") for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\",", "None) utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy", "{\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # settings priority over", "strings cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(),", "ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda:", "of a resulting HTTP response raises itself an error (because of implementation issue),", "be the type directly. .. versionchanged:: 2.0 Since ``param`` can come from user", "\"password\": return real_func(*args, **kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0),", "is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda:", "is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage of function raises internal", "msg=\"invalid callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if", "to be a type while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True),", "of 'normal HTTP error'. \"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag", "\"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name,", "raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead of", "enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of", "param_compare=int, is_equal=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare", "be some container instance while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True),", "runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\" VALUE2 = \"value-2\" @runner.MAGPIE_TEST_LOCAL @runner.MAGPIE_TEST_UTILS class", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url,", "ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\",", "resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): #", "resp = utils.mock_request(\"/some/path\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v", "utils.check_val_equal(DummyEnum.get(\"random\", \"something\"), \"something\") def test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal", "by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\"", "should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP", "If recursive safeguard does its job, it should end up raising 'HTTPInternalServerError' directly", "utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True,", "ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\",", "recursive raises. # If recursive safeguard does its job, it should end up", "'AssertionError' pass except Exception as exc: self.fail(\"unexpected error during request creation should not", "1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\",", "the various utility operations employed by Magpie. \"\"\" import os import unittest from", "msg=\"invalid flag '{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and", "an error (because of implementation issue), while it is processing another pre-raised error,", "{}\".format(exc)) # if our counter reached higher than the MAX (i.e.: 2*MAX from", "job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise", "call \"/signin\" followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and \"ZigguratSignInBadAuth\"", "flag '{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare``", "ExtendedEnum, get_header, get_magpie_url from tests import runner, utils class DummyEnum(ExtendedEnum): VALUE1 = \"value-1\"", "test def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app =", "in error during response generation should raise\" ) def test_guess_target_format_default(self): request = utils.mock_request()", "settings generated by any test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\":", "direct when nothing define in settings), # otherwise 'get_constant' can find the current", "# ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing scheme even", "is_type=True), HTTPInternalServerError) # compare flags expecting param_compare to be some container instance while", "magpie import __meta__, constants from magpie.api import exception as ax from magpie.api import", "directly # (without further formatting attempt when reaching the MAX value), stopping the", "internal function that handles formatting and generation of a resulting HTTP response raises", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "caught\") utils.check_raises(lambda: ax.verify_param(\"b\", not_in=True), HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it should be", "settings priority over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL", "implementation issue), while it is processing another pre-raised error, that it does not", "is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) #", "HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings", "nothing define in settings), # otherwise 'get_constant' can find the current thread settings", "HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case", "must be of same type for valid comparison, except for ``is_type`` where compare", "import exception as ax from magpie.api import generic as ag from magpie.api import", "Tests for the various utility operations employed by Magpie. \"\"\" import os import", "if it did not get called at least more than once, use cases", "invalid input use-cases \"\"\" # compare flags expecting a value (can only consider", "ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\",", "get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\")", "fallback=int), # noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\"", "of same type for valid comparison, except for ``is_type`` where compare parameter must", "utils.check_val_equal(url, \"https://test-server.com\") # settings priority over envs url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\")", "VALUE1 = DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements", "{}].\".format(url)) return real_func(request, *args, **kwargs) for url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are", "``is_type`` flag is requested, we know that ``param_compare`` must be a type. Inversely,", "utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()}", ".env when running all tests. # Always need to provide a settings container", "*args, **kwargs): if args[1] != \"password\": return real_func(*args, **kwargs) request, args = args[0],", "def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with fixed \"MAGPIE_URL\" within the API", "utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ,", "Verifies that incorrect usage of utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError,", "the response formatter via 'evaluate_call' itself raising to # trigger 'mock_raise' recursively within", "{\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected", "is to ensure that values are compared accordingly in a controlled fashion. Therefore,", "# noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate", "param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest)", "the type directly. .. versionchanged:: 2.0 Since ``param`` can come from user input,", "base_url + _paths.pop(0), \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) return real_func(request,", "v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\")", "with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url,", "import generic as ag from magpie.api import requests as ar from magpie.utils import", "it bad request because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest)", ") def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}), # noqa HTTPInternalServerError,", "ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting param_compare to be some container", "are compared accordingly in a controlled fashion. Therefore, error to be raised is", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "**__): ax.evaluate_call(lambda: int(\"x\")) try: app = utils.get_test_magpie_app() with mock.patch(\"magpie.api.exception.generate_response_http_format\", side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call):", "**NOT** raise ``HTTPInternalServerError`` because the whole point of the method is to ensure", "- :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" # compare flags expecting a value", "ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden),", "Test multiple request routing with fixed \"MAGPIE_URL\" within the API application. Signin with", "Disable constants globals() for every case, since it can pre-loaded from .env when", "# this combination should happen twice, one in signin route and another on", "provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError)", "that it does not end up into an endless recursive call stack of", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"1234\"}))", "DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be", "a type while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda:", "not be a type if ``is_type`` is not requested, but other flags require", "only consider it bad request because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1,", "matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"],", "incorrect usage of utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable", "some form of comparison between values. We evaluate these use cases here. ..", "caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param`` and ``param_compare`` must be of same type", "compare parameter must be the type directly. .. versionchanged:: 2.0 Since ``param`` can", "and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url =", "utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True,", "# noqa HTTPInternalServerError, msg=\"flag specified with incorrect type should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\",", "safeguard does its job, it should end up raising 'HTTPInternalServerError' directly # (without", "\"\"\" Verifies that incorrect usage of utility is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int),", "# Since tweens are set up to format all response prior to return,", "def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\": HTTPInternalServerError()} utils.check_raises( lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError,", "utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp =", "\"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError,", "utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every case, since it", "error (because of implementation issue), while it is processing another pre-raised error, that", "side_effect=mock_raise): with mock.patch(\"magpie.api.login.login.get_session\", side_effect=mock_lambda_call): # Call request that ends up calling the response", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden)", "import requests as ar from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url from tests", "directly instead of usual TestResponse returned. That error is again re-raised as 'AssertionError'", "settings), # otherwise 'get_constant' can find the current thread settings generated by any", "flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\",", "to be some container instance while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1,", "test_guess_target_format_default(self): request = utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def", "test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every case, since it can pre-loaded from", "caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect HTTP class to", "the whole point of the method is to ensure that values are compared", "= \"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied", "up into an endless recursive call stack of raised errors. \"\"\" mock_calls =", "tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa def test_format_content_json_str_invalid_usage(self): non_json_serializable_content = {\"key\":", "to # trigger 'mock_raise' recursively within 'raise_http' function. # Since tweens are set", "param_compare=int, is_type=True) ax.verify_param(\"x\", param_compare=six.string_types, is_type=True) ax.verify_param(\"x\", param_compare=str, is_type=True) ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\",", "\"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, True) def", "* ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda:", "= utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v =", "mock import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings import", "utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], is_in=True), HTTPBadRequest) utils.check_raises(lambda:", "is not requested, but other flags require some form of comparison between values.", "v = asbool(ar.get_query_param(resp, \"query\")) utils.check_val_equal(v, False) resp = utils.mock_request(\"/some/path?Query=TRUE\") v = asbool(ar.get_query_param(resp, \"query\"))", "in signin route and another on the redirected internal login _paths = [\"/signin\",", "must be the type directly. .. versionchanged:: 2.0 Since ``param`` can come from", "# trigger 'mock_raise' recursively within 'raise_http' function. # Since tweens are set up", "if args[1] != \"password\": return real_func(*args, **kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url,", "ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None, is_type=True), HTTPInternalServerError) # compare flags expecting", "\"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda:", "for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\",", "pre-loaded from .env when running all tests. # Always need to provide a", "require some form of comparison between values. We evaluate these use cases here.", "= utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"VALUE\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v =", "real_multiform_post_checked base_url = \"http://localhost\" def mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\": return", "compare should raise invalid type\") for flag in [\"not_none\", \"not_empty\", \"not_in\", \"not_equal\", \"is_none\",", "endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: # Request called with above", "args[1] != \"password\": return real_func(*args, **kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url", "all tests. # Always need to provide a settings container (even empty direct", "headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp, expected_code=406,", "v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\",", "error is again re-raised as 'AssertionError' pass except Exception as exc: self.fail(\"unexpected error", "itself # call 'raise_http' again each time operation fails, creating recursive raises. #", "serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {}, {}, \"\", {}),", "HTTPInternalServerError) # strings cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def", "a type. Inversely, ``param_compare`` must not be a type if ``is_type`` is not", "Exception as exc: self.fail(\"unexpected error during request creation should not raise: {}\".format(exc)) #", "is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden)", "end up raising 'HTTPInternalServerError' directly # (without further formatting attempt when reaching the", "ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self): utils.check_all_equal(DummyEnum.values(), [\"value-1\", \"value-2\"], any_order=True) def test_enum_get_by_value(self): utils.check_val_equal(DummyEnum.get(\"value-1\"), DummyEnum.VALUE1)", "asbool from magpie import __meta__, constants from magpie.api import exception as ax from", "for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\",", "magpie.api import exception as ax from magpie.api import generic as ag from magpie.api", "utils.check_val_equal(url, \"http://localhost:9871\") # URL fixed with missing scheme even if defined with mock.patch.dict(os.environ,", "= utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:1234\")", "mock_get_post(real_func, *args, **kwargs): if args[1] != \"password\": return real_func(*args, **kwargs) request, args =", "# noqa HTTPInternalServerError, msg=\"incorrect HTTP class to raise error should be caught\") utils.check_raises(lambda:", "return real_func(*args, **kwargs) request, args = args[0], args[1:] utils.check_val_equal(request.url, base_url + _paths.pop(0), \"Proxied", "the other hand, when ``is_type`` flag is requested, we know that ``param_compare`` must", "pre-raised error, that it does not end up into an endless recursive call", "fashion. Therefore, error to be raised is an 'expected' validation failure (``HTTPBadRequest`` or", "when ``is_type`` flag is requested, we know that ``param_compare`` must be a type.", "utils.check_val_equal(v, True) resp = utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp", "set up to format all response prior to return, the raised error will", "ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False,", "does its job, it should end up raising 'HTTPInternalServerError' directly # (without further", "= {\"user_name\": \"foo\", \"password\": \"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp =", "\"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url = \"http://localhost\"", "is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise invalid type\") for flag in [\"not_none\",", "utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) def", "request = utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self):", "provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError)", "Request called with above 'test_request' should catch the final 'HTTPInternalServerError' that is #", "is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\" Invalid usage", "utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\")", "\"http://localhost:9871\") # env URL found if not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\":", "has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.scheme\":", "utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"}))", "def test_magpie_prefix_direct_request(self): base_url = \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\":", "url}) path = \"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url +", "twice, one in signin route and another on the redirected internal login _paths", "of raised errors. \"\"\" mock_calls = {\"counter\": 0} def mock_raise(*_, **__): # avoid", "not end up into an endless recursive call stack of raised errors. \"\"\"", "utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True,", "representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete enum elements should be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies", "= {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]:", "priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.scheme\": \"https\"}))", "\"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\":", "http_error=HTTPForbidden), HTTPForbidden) def test_verify_param_proper_verifications_passed(self): ax.verify_param(\"x\", param_compare=[\"a\", \"b\"], not_in=True) ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], is_in=True) ax.verify_param(1,", "is processing another pre-raised error, that it does not end up into an", "resp = utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v", "\"test-server.com\"})) utils.check_val_equal(url, \"http://test-server.com:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"test.com\", \"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://test.com:1234\") url", "more than once, use cases did not really get tested utils.check_val_is_in(mock_calls[\"counter\"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX", "to be raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided)", "request because comparison values are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\",", "# strings cases handled correctly (no raise) utils.check_no_raise(lambda: ax.verify_param(\"1\", param_compare=\"1\", is_equal=True)) def test_enum_values_listing(self):", "here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" # compare flags", "expected as JSON serializable should raise\" ) def test_generate_response_http_format_invalid_usage(self): utils.check_raises( lambda: ax.generate_response_http_format(None, {},", "'HTTPInternalServerError' that is # raised directly instead of usual TestResponse returned. That error", "request that ends up calling the response formatter via 'evaluate_call' itself raising to", "a value (can only consider it bad request because comparison values are valid)", "real safeguard fails doing its job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return", "return, the raised error will itself # call 'raise_http' again each time operation", "accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\") utils.check_raises(lambda: ax.evaluate_call(lambda:", "@runner.MAGPIE_TEST_UTILS class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.version = __meta__.__version__ # only local test", "higher than the MAX (i.e.: 2*MAX from mock), the safeguard did not do", "utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error", "job, it should end up raising 'HTTPInternalServerError' directly # (without further formatting attempt", "is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True,", "different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of utility is raised accordingly.", "http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\", http_error=HTTPForbidden), HTTPForbidden)", "\";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\") v", "ax.verify_param(\"1\", param_compare=str, is_equal=True), HTTPInternalServerError) # compare flags expecting param_compare to be a type", "HTTPForbidden) utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None,", "it did not get called at least more than once, use cases did", "again each time operation fails, creating recursive raises. # If recursive safeguard does", "fixed \"MAGPIE_URL\" within the API application. Signin with invalid credentials will call \"/signin\"", "called at least more than once, use cases did not really get tested", "Always need to provide a settings container (even empty direct when nothing define", "it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"], not_in=True, http_error=HTTPOk), # noqa HTTPInternalServerError, msg=\"incorrect", "itself raising to # trigger 'mock_raise' recursively within 'raise_http' function. # Since tweens", "< LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid", "\"\"\" utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"]), HTTPInternalServerError, msg=\"missing any flag specification should be caught\")", "a type if ``is_type`` is not requested, but other flags require some form", "param_compare to be a type while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\",", "is raised accordingly. \"\"\" utils.check_raises(lambda: ax.evaluate_call(int), HTTPInternalServerError, msg=\"invalid callable non-lambda 'call' should raise\")", "\"not_in\", \"not_equal\", \"is_none\", \"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag:", "its job if mock_calls[\"counter\"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1", "where = ag.guess_target_format(request) utils.check_val_equal(content_type, CONTENT_TYPE_JSON) utils.check_val_equal(where, True) def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals()", "should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable compare should raise", "import mock import six from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk from pyramid.settings", "from user input, we should **NOT** raise ``HTTPInternalServerError`` because the whole point of", "Call request that ends up calling the response formatter via 'evaluate_call' itself raising", "with above 'test_request' should catch the final 'HTTPInternalServerError' that is # raised directly", "to raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError, msg=\"incorrect non-iterable", "= utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if not in settings", "runtime 'unexpected' processing error. On the other hand, when ``is_type`` flag is requested,", "path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url, base_url + path, \"Proxied path should have been auto-resolved [URL:", "resulting HTTP response raises itself an error (because of implementation issue), while it", "not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\",", "[\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self):", "\"\", {}), # noqa HTTPInternalServerError, msg=\"invalid arguments resulting in error during response generation", "utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"localhost\"})) utils.check_val_equal(url, \"http://localhost:2001\") url", "param_compare=int, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest)", "utils.check_response_basic_info(resp, expected_code=401, expected_method=\"POST\") def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in", "**__): # avoid raising forever if the real safeguard fails doing its job", "-*- \"\"\" test_utils ---------------------------------- Tests for the various utility operations employed by Magpie.", "\"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split in [\";,\", \",;\", \";\", (\",\", \";\"), [\";\", \",\"]]:", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=int, is_type=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda:", "and another on the redirected internal login _paths = [\"/signin\", \"/signin_internal\"] app =", "not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled correctly (no", "HTTPOk from pyramid.settings import asbool from magpie import __meta__, constants from magpie.api import", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(True, is_false=True),", "ax.verify_param(\"x\", param_compare=\"y\", not_equal=True) ax.verify_param(\"x\", param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None,", "HTTP class to raise error should be caught\") utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True), HTTPInternalServerError,", "param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags expect a value but type is", "utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases", "get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda:", "'expected' validation failure (``HTTPBadRequest`` or whichever ``http_error`` provided) instead of runtime 'unexpected' processing", "utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(1, is_none=True,", "stopping the endless loop. utils.test_request(app, \"GET\", \"/session\", expect_errors=True) except AssertionError: # Request called", "should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal function that handles", "defined with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\", \"MAGPIE_PORT\": \"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url,", "ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled", "Signin with invalid credentials will call \"/signin\" followed by sub-request \"/signin_internal\" and finally", "ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"\", not_empty=True, http_error=HTTPForbidden), HTTPForbidden) utils.check_raises(lambda: ax.verify_param(\"abc\", is_empty=True, http_error=HTTPForbidden),", "raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use case if internal function that handles formatting", "be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError, msg=\"flag specified with", "_paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) < LooseVersion(\"0.10.0\"): # user name doesn't exist", "utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=None,", "do its job # if it did not get called at least more", "utils.mock_request(\"/some/path?query=value\") v = ar.get_query_param(resp, \"query\") utils.check_val_equal(v, \"value\") resp = utils.mock_request(\"/some/path?QUERY=VALUE\") v = ar.get_query_param(resp,", "app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({}))", "= ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v,", "(\",\", \";\"), [\";\", \",\"]]: utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON) def test_get_query_param(self): resp = utils.mock_request(\"/some/path\")", "mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url = utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:2001\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\":", "On the other hand, when ``is_type`` flag is requested, we know that ``param_compare``", "creating recursive raises. # If recursive safeguard does its job, it should end", "safeguard did not do its job # if it did not get called", "AssertionError: # Request called with above 'test_request' should catch the final 'HTTPInternalServerError' that", "need to provide a settings container (even empty direct when nothing define in", "lambda: ax.format_content_json_str(200, \"\", non_json_serializable_content, CONTENT_TYPE_JSON), HTTPInternalServerError, msg=\"invalid content format expected as JSON serializable", "ax.verify_param(True, is_false=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"\",", "on the redirected internal login _paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url})", "controlled fashion. Therefore, error to be raised is an 'expected' validation failure (``HTTPBadRequest``", "if not in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority", "= {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True)", "def test_get_magpie_url_defined_or_defaults(self): # Disable constants globals() for every case, since it can pre-loaded", "\"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\") with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"localhost:9871\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\":", "ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when compare flags", "flag specification should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=None), # noqa HTTPInternalServerError,", "ax.RAISE_RECURSIVE_SAFEGUARD_MAX: return TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\"))", "LooseVersion(\"0.10.0\"): # user name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password", "will call \"/signin\" followed by sub-request \"/signin_internal\" and finally \"ZigguratSignInBadAuth\". Both \"/signin\" and", "counter reached higher than the MAX (i.e.: 2*MAX from mock), the safeguard did", "class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\"concrete", "base_url = \"http://localhost\" for url in [\"http://localhost\", \"http://localhost/magpie\"]: app = utils.get_test_magpie_app({\"magpie.url\": url}) path", "get_magpie_url({\"magpie.port\": \"1234\"})) utils.check_val_equal(url, \"http://localhost:1234\") url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.port\": \"9000\", \"magpie.scheme\": \"https\"})) utils.check_val_equal(url, \"https://localhost:9000\")", "\"is_empty\", \"is_in\", \"is_equal\", \"is_true\", \"is_false\", \"is_type\", \"matches\"]: utils.check_raises(lambda: ax.verify_param(\"x\", **{flag: 1}), HTTPInternalServerError, msg=\"invalid", "\"/signin\" and \"ZigguratSignInBadAuth\" use \"get_multiformat_body\". \"\"\" from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked base_url", "in settings url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) # ignored, URL priority utils.check_val_equal(url, \"http://localhost:9871\")", "HTTPInternalServerError, msg=\"missing 'param_compare' for flag needing it should be caught\") utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"b\"],", "is_equal=True), HTTPBadRequest) # when compare flags expect a value but type is provided,", "TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try: app", "None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, True) resp =", "return TypeError() mock_calls[\"counter\"] += 1 raise TypeError() def mock_lambda_call(*_, **__): ax.evaluate_call(lambda: int(\"x\")) try:", "in mock) each time a post to get the 'password' is called in", "not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def test_verify_param_args_incorrect_usage(self): \"\"\"", "\"Proxied path should have been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs) for", "# with default error utils.check_raises(lambda: ax.verify_param(\"b\", param_compare=[\"a\", \"b\"], not_in=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=[\"a\",", "is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\", is_empty=True) ax.verify_param(\"abc\", matches=True, param_compare=r\"[a-z]+\") def", "comparison, except for ``is_type`` where compare parameter must be the type directly. ..", "flags require some form of comparison between values. We evaluate these use cases", "been auto-resolved [URL: {}].\".format(url)) def test_magpie_prefix_request_with_multiple_route_url(self): \"\"\" Test multiple request routing with fixed", "input use-cases \"\"\" # compare flags expecting a value (can only consider it", "flags expecting param_compare to be some container instance while value provided is not", "url in [\"http://localhost\", \"http://localhost/magpie\"]: # paths are reduced (pop in mock) each time", "reduced (pop in mock) each time a post to get the 'password' is", "cases here. .. seealso:: - :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases \"\"\" # compare", "is_empty=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"abc\", matches=True, param_compare=r\"[A-Z]+\"), HTTPBadRequest) # with requested error utils.check_raises(lambda: ax.verify_param(\"b\",", "to return, the raised error will itself # call 'raise_http' again each time", "name doesn't exist utils.check_response_basic_info(resp, expected_code=406, expected_method=\"POST\") else: # invalid username/password credentials utils.check_response_basic_info(resp, expected_code=401,", "is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) #", "expecting param_compare to be a type while value provided is not utils.check_raises(lambda: ax.verify_param(1,", "\"<PASSWORD>\"} headers = {\"Content-Type\": CONTENT_TYPE_JSON, \"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data,", "HTTPInternalServerError, msg=\"invalid flag '{}' type should be caught\".format(flag)) def test_verify_param_compare_types(self): \"\"\" Arguments ``param``", "did not get called at least more than once, use cases did not", "_paths.pop(0), \"Proxied path should have been auto-resolved [URL: {}].\".format(url)) return real_func(request, *args, **kwargs)", "any test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url =", "case if internal function that handles formatting and generation of a resulting HTTP", "noqa HTTPInternalServerError, msg=\"invalid callable non-lambda 'fallback' should raise\") def test_evaluate_call_recursive_safeguard(self): \"\"\" Validate use", "\"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\", \"Content-Type\", \"CONTENT_TYPE\", \"CONTENT-TYPE\"]: for split", "utils.mock_request(\"/some/path?value=test\") v = ar.get_query_param(resp, \"value\", True) utils.check_val_equal(v, \"test\") resp = utils.mock_request(\"/some/path?query=value\") v =", "can come from user input, we should **NOT** raise ``HTTPInternalServerError`` because the whole", "raise ``HTTPInternalServerError`` because the whole point of the method is to ensure that", "are reduced (pop in mock) each time a post to get the 'password'", "is not utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError) utils.check_raises(lambda:", "None) resp = utils.mock_request(\"/some/path?other=test\") v = ar.get_query_param(resp, \"value\") utils.check_val_equal(v, None) resp = utils.mock_request(\"/some/path?other=test\")", "in 'login' module # this combination should happen twice, one in signin route", "param_compare=\"x\", is_equal=True) ax.verify_param(True, is_true=True) ax.verify_param(False, is_false=True) ax.verify_param(1, not_none=True) ax.verify_param(None, is_none=True) ax.verify_param(\"abc\", not_empty=True) ax.verify_param(\"\",", "utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=str, not_in=True), HTTPInternalServerError) # strings cases handled correctly (no raise) utils.check_no_raise(lambda:", "\"Accept\": CONTENT_TYPE_JSON} resp = utils.test_request(app, \"POST\", _paths[0], json=data, headers=headers, expect_errors=True) if LooseVersion(self.version) <", "are valid) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=1, is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"1\", param_compare=True, is_equal=True), HTTPBadRequest) utils.check_raises(lambda:", "flags expect a value but type is provided, should still detect incorrect input", "def test_get_header_split(self): headers = {\"Content-Type\": \"{}; charset=UTF-8\".format(CONTENT_TYPE_JSON)} for name in [\"content_type\", \"content-type\", \"Content_Type\",", "utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"x\", not_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(\"x\", param_compare=\"y\",", "port, URL has priority url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.host\": \"server\"})) utils.check_val_equal(url, \"http://server:1234\") url =", "be different\") def test_evaluate_call_callable_incorrect_usage(self): \"\"\" Verifies that incorrect usage of utility is raised", "HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=\"1\", is_equal=True), HTTPBadRequest) utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest) # when", "type while value provided is not utils.check_raises(lambda: ax.verify_param(1, param_compare=\"x\", is_type=True), HTTPInternalServerError) utils.check_raises(lambda: ax.verify_param(1,", "raise: {}\".format(exc)) # if our counter reached higher than the MAX (i.e.: 2*MAX", "utils.check_no_raise(lambda: get_magpie_url({})) utils.check_val_equal(url, \"http://localhost:9871\") # env URL found if not in settings url", "the redirected internal login _paths = [\"/signin\", \"/signin_internal\"] app = utils.get_test_magpie_app({\"magpie.url\": url}) with", "test_enum_other(self): class OtherEnum(ExtendedEnum): VALUE1 = DummyEnum.VALUE1.value # copy internal string representation utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1,", "should raise\" ) def test_guess_target_format_default(self): request = utils.mock_request() content_type, where = ag.guess_target_format(request) utils.check_val_equal(content_type,", "of function raises internal server error instead of 'normal HTTP error'. \"\"\" utils.check_raises(lambda:", "= utils.get_test_magpie_app({\"magpie.url\": url}) path = \"/version\" resp = utils.test_request(app, \"GET\", path) utils.check_response_basic_info(resp) utils.check_val_equal(resp.request.url,", "``param`` can come from user input, we should **NOT** raise ``HTTPInternalServerError`` because the", "by any test app with mock.patch.object(constants, \"MAGPIE_URL\", None): with mock.patch.dict(os.environ, {\"MAGPIE_URL\": \"\"}): url", "from pyramid.settings import asbool from magpie import __meta__, constants from magpie.api import exception", "\"1234\"}): url = utils.check_no_raise(lambda: get_magpie_url({\"magpie.url\": \"https://test-server.com\"})) utils.check_val_equal(url, \"https://test-server.com\") # ignore port, URL has", "time operation fails, creating recursive raises. # If recursive safeguard does its job," ]
[ "invTypes = Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader", "Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes,", "volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1])))", "group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r')", "csv from sqlalchemy import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata)", "= Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader =", "= Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' )", "with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for", "connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader", ") with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group", "== literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',')", "from sqlalchemy import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes", "in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as", "volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert(), typeID=group[1], volume=group[0] )", "Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes', metadata)", "Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as", "metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',')", "= csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) )", "source_path): invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'),", "importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with open(", "os import csv from sqlalchemy import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes =", "import os import csv from sqlalchemy import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes", "metadata) invTypes = Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes:", "invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader =", "for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'),", "volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes:", "'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute(", "groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert(), typeID=group[1], volume=group[0]", "as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID", "delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path,", "'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader:", "with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in", "import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes',", "os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in", "select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes,", "sqlalchemy import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes =", "metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with open( os.path.join(source_path,", "csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))) ) with", "'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute(", "as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert(), typeID=group[1],", ") as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'],", "def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with", "groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID ==", "import csv from sqlalchemy import Table,literal_column,select def importVolumes(connection, metadata, source_path): invVolumes = Table('invVolumes',", "open( os.path.join(source_path, 'invVolumes1.csv'), 'r' ) as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group", "literal_column(group[1]))) ) with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for", "invVolumes = Table('invVolumes', metadata) invTypes = Table('invTypes', metadata) with open( os.path.join(source_path, 'invVolumes1.csv'), 'r'", "open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader:", "'r') as groupVolumes: volumereader = csv.reader(groupVolumes, delimiter=',') for group in volumereader: connection.execute( invVolumes.insert()," ]
[ "def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url'])", "if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag = True for hashtag in entities['hashtags']:", "q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']})", "url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id,", "else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False)", "domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob):", "= json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang != \"en\": return False", "import Article, Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data,", "get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain", "in tweet_data: return None # if tweet_data.get('lang') != 'en': # return None tweet,", "get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count +=", "if original_tweet is not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save()", "not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj):", "for q in questions: source = q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'],", "msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'):", "import IntegrityError from django.utils.timezone import utc from urlparse import urlparse import json from", "original_tweet is not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def", "Given a dataset object, imports a tweet from json object into the dataset.", "if lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created", "is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type =", "user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language =", "the dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang", "1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags if", "me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention", "sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object, imports a", "import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username", "get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name)", "= get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count", "mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results =", "1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status,", "in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention =", "media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original tweet shared_count", "create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object, imports a tweet from json string", "tweet.contains_mention = True for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count +=", "utc from urlparse import urlparse import json from datetime import datetime from email.utils", "tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None: original_tweet.shared_count +=", "len(entities['media']) > 0: tweet.contains_media = True for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions", "\"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang != \"en\":", "parsedate from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import", "= user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object,", "\"\"\" if 'in_reply_to_status_id' not in tweet_data: return None # if tweet_data.get('lang') != 'en':", "urls if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url = True for url in", "create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status')", "get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'),", "sys, six from django.db import IntegrityError from django.utils.timezone import utc from urlparse import", "sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) #", "if original_tweet is not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save()", "# created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language", "zone, created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return", "if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang'])", "def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name']", "screen_name, dataset_obj): # update original tweet shared_count tmp_tweet = { 'id': status_id, 'user':", "= get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet,", "defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text']) question.save()", "tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions from json string \"\"\"", "> 0: tweet.contains_hashtag = True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if", "original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0:", "if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc)", "entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url = True for url in entities['urls']: tweet.urls.add(get_or_create_url(url))", "# return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text", "user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) #", "if tweet_data.get('lang') != 'en': # return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) #", "user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count =", "sender.full_name = user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count']", "user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count =", "+= 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags'])", "<reponame>hds-lab/textvis-drg import sys, six from django.db import IntegrityError from django.utils.timezone import utc from", "entities, dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag = True", "user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url =", "get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj):", "entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media']) > 0: tweet.contains_media = True", "in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url =", "= get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'],", "dataset_obj): # update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is", "dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj)", "tweet.contains_hashtag = True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and", "tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object, imports a tweet from", "'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is", "return url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id,", "is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None:", "ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'],", "'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text']) question.save() for dim", "get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'],", "tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\")", "created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update", "dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data: return None # if tweet_data.get('lang') !=", "from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created", "not in tweet_data: return None # if tweet_data.get('lang') != 'en': # return None", "return None # if tweet_data.get('lang') != 'en': # return None tweet, created =", "import sys, six from django.db import IntegrityError from django.utils.timezone import utc from urlparse", "= Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url,", "entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention = True", "tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language", "dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load research", "= Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if", "1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object, imports a", "sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name", "type if tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id')", "+= 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object, imports", "tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\")", "load_research_questions_from_json(json_str): \"\"\" Load research questions from json string \"\"\" questions = json.loads(json_str) for", "!= \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code)", "dataset_obj): \"\"\" Given a dataset object, imports a tweet from json string into", "source['year'], 'venue': source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text']) question.save() for dim in", "= True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media'])", "dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset", "def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original tweet shared_count tmp_tweet = {", "if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not None: tweet.type", "\"\"\" questions = json.loads(json_str) for q in questions: source = q['source'] article, created", "tweet shared_count tmp_tweet = { 'id': status_id, 'user': { 'id': user_id, 'screen_name': screen_name,", "# if tweet_data.get('lang') != 'en': # return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id'])", "research questions from json string \"\"\" questions = json.loads(json_str) for q in questions:", "return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang", "mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object,", "> 0: tweet.contains_url = True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if", "tmp_tweet = { 'id': status_id, 'user': { 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id':", "for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media']) > 0:", "tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions from json string \"\"\" questions =", "import parsedate from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models", "original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj)", "get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url'])", "original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update", "tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media']) > 0: tweet.contains_media = True for", "from json string into the dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang", "def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object, imports a tweet from json", "datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender", "if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url = True for url in entities['urls']:", "# sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions", "\"\"\" Load research questions from json string \"\"\" questions = json.loads(json_str) for q", "original_tweet is not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def", "object, imports a tweet from json string into the dataset. \"\"\" tweet_data =", "> 0: tweet.contains_mention = True for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj)", "'link': source['link']}) question = Question(source=article, text=q['text']) question.save() for dim in q['dimensions']: question.add_dimension(dim) question.save()", "mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def", "if tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is", "return zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht,", "created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht", "msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created =", "if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count", "get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone,", "q in questions: source = q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year':", "dataset_obj): \"\"\" Given a dataset object, imports a tweet from json object into", "if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone", "into the dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data: return None # if", "= urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url", "from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment", "IntegrityError from django.utils.timezone import utc from urlparse import urlparse import json from datetime", "def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain,", "media if entities.get('media') and len(entities['media']) > 0: tweet.contains_media = True for me in", "media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original tweet shared_count tmp_tweet =", "None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj):", "tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) #", "# text if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time =", "from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if", "from django.utils.timezone import utc from urlparse import urlparse import json from datetime import", "if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count", "lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created", "Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob):", "MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob):", "set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions from json", "a tweet from json object into the dataset. \"\"\" if 'in_reply_to_status_id' not in", "update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None:", "lang, created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return", "None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if", "into the dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if", "language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'],", "def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name,", "imports a tweet from json object into the dataset. \"\"\" if 'in_reply_to_status_id' not", "dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag = True for", "'id': status_id, 'user': { 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet", "import json from datetime import datetime from email.utils import parsedate from msgvis.apps.questions.models import", "tweet from json object into the dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data:", "six from django.db import IntegrityError from django.utils.timezone import utc from urlparse import urlparse", "None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type =", "= user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if", "datetime import datetime from email.utils import parsedate from msgvis.apps.questions.models import Article, Question from", "tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone =", "= Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name']", "user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'):", "create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a", "= True for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions'])", "tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'):", "a dataset object, imports a tweet from json string into the dataset. \"\"\"", "return lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype,", "from django.db import IntegrityError from django.utils.timezone import utc from urlparse import urlparse import", "not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet')", "source['link']}) question = Question(source=article, text=q['text']) question.save() for dim in q['dimensions']: question.add_dimension(dim) question.save() return", "not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities,", "in questions: source = q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'],", "user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return", "= user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj):", "None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text = tweet_data['text']", "in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media']) > 0: tweet.contains_media =", "= Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone def", "import datetime from email.utils import parsedate from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models", "source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text']) question.save() for", "1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original tweet", "entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url = True", "get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object, imports a tweet from json object", "get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count +=", "user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'):", "+= 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count original_tweet =", "'in_reply_to_status_id' not in tweet_data: return None # if tweet_data.get('lang') != 'en': # return", "}, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None:", "msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment def", "object into the dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data: return None #", "created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype", "0: tweet.contains_mention = True for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count", "url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media']) > 0: tweet.contains_media", "dataset object, imports a tweet from json object into the dataset. \"\"\" if", "Article, Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj):", "if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language", "= tweet_data.get('lang') if lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code):", "tweet_data.get('lang') != 'en': # return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text", "for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0:", "def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object, imports a tweet from json", "def handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag", "media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): #", "django.db import IntegrityError from django.utils.timezone import utc from urlparse import urlparse import json", "sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object, imports a tweet from", "handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag =", "= MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def", "from urlparse import urlparse import json from datetime import datetime from email.utils import", "= True for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1", "import utc from urlparse import urlparse import json from datetime import datetime from", "= { 'id': status_id, 'user': { 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None", "tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load", "urlparse import urlparse import json from datetime import datetime from email.utils import parsedate", "if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data,", "original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time", "len(entities['urls']) > 0: tweet.contains_url = True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media", "dataset_obj) if original_tweet is not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1", "def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created =", "if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url", "Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text'])", "'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if", "= get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'],", "# update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not", "get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'):", "None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None: original_tweet.replied_to_count +=", "tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save()", "handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original tweet shared_count tmp_tweet = { 'id':", "'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None: original_tweet.replied_to_count", "= urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media,", "url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created =", "def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created =", "created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question =", "if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def", "> 0: tweet.contains_media = True for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if", "0: tweet.contains_hashtag = True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls')", "tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) #", "0: tweet.contains_media = True for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions')", "shared_count tmp_tweet = { 'id': status_id, 'user': { 'id': user_id, 'screen_name': screen_name, },", "user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet", "urlparse import json from datetime import datetime from email.utils import parsedate from msgvis.apps.questions.models", "if 'in_reply_to_status_id' not in tweet_data: return None # if tweet_data.get('lang') != 'en': #", "original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None: original_tweet.shared_count += 1 original_tweet.save()", "= user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if", "Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name):", "return mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results", "# hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag = True for hashtag", "source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text']) question.save() for dim in q['dimensions']: question.add_dimension(dim)", "question = Question(source=article, text=q['text']) question.save() for dim in q['dimensions']: question.add_dimension(dim) question.save() return True", "len(entities['hashtags']) > 0: tweet.contains_hashtag = True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls", "hashtags if entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag = True for hashtag in", "'en': # return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'):", "and len(entities['media']) > 0: tweet.contains_media = True for me in entities['media']: tweet.media.add(get_or_create_media(me)) #", "zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created", "None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): #", "len(entities['user_mentions']) > 0: tweet.contains_mention = True for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention,", "Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'):", "string \"\"\" questions = json.loads(json_str) for q in questions: source = q['source'] article,", "lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created =", "shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None: original_tweet.shared_count += 1", "dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet,", "def handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if", "handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj)", "created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language =", "def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created =", "for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls']) > 0:", "not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type", "* from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id'])", "from email.utils import parsedate from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import *", "tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention = True for", "json string \"\"\" questions = json.loads(json_str) for q in questions: source = q['source']", "is not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet,", "tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str):", "tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text = tweet_data['text'] #", "user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\"", "mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\"", "original tweet shared_count tmp_tweet = { 'id': status_id, 'user': { 'id': user_id, 'screen_name':", "dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang !=", "entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj):", "from json object into the dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data: return", "from datetime import datetime from email.utils import parsedate from msgvis.apps.questions.models import Article, Question", "is not None: original_tweet.replied_to_count += 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status,", "text if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6],", "urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def", "= user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save()", "object, imports a tweet from json object into the dataset. \"\"\" if 'in_reply_to_status_id'", "entities.get('media') and len(entities['media']) > 0: tweet.contains_media = True for me in entities['media']: tweet.media.add(get_or_create_media(me))", "urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return", "= True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls'])", "entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention = True for mention in entities['user_mentions']: mention_obj", "Load research questions from json string \"\"\" questions = json.loads(json_str) for q in", "if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count", "from json string \"\"\" questions = json.loads(json_str) for q in questions: source =", "json.loads(json_str) for q in questions: source = q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors':", "!= 'en': # return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if", "email.utils import parsedate from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import * from", "{ 'id': status_id, 'user': { 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None }", "original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None: original_tweet.shared_count", "# time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not", "{ 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj)", "entities.get('hashtags') and len(entities['hashtags']) > 0: tweet.contains_hashtag = True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag))", "if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention = True for mention in entities['user_mentions']:", "Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if tweet_data.get('created_at'):", "and len(entities['urls']) > 0: tweet.contains_url = True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) #", "time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not None:", "questions: source = q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue':", "dataset_obj) if original_tweet is not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1", "original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'):", "save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions from json string", "if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given", "a dataset object, imports a tweet from json object into the dataset. \"\"\"", "return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object, imports a tweet", "1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags') and len(entities['hashtags']) >", "= get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet is not None: original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count", "json object into the dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data: return None", "dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created", "screen_name, }, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not", "Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original tweet", "source = q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'],", "None # if tweet_data.get('lang') != 'en': # return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj,", "dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'):", "+= 1 original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original", "# user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention = True for mention", "tweet_data: return None # if tweet_data.get('lang') != 'en': # return None tweet, created", "article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question", "status_id, 'user': { 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet =", "questions from json string \"\"\" questions = json.loads(json_str) for q in questions: source", "json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang != \"en\": return False return", "tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not None: tweet.type =", "Question from msgvis.apps.corpus.models import * from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender,", "= Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link': source['link']}) question = Question(source=article,", "original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags')", "import * from msgvis.apps.enhance.models import set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj,", "# urls if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url = True for url", "False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang def", "created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc", "get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj)", "created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at", "tzinfo=utc) # language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender", "in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data,", "a tweet from json string into the dataset. \"\"\" tweet_data = json.loads(json_str) if", "# type if tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif", "sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset", "tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type", "sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count']", "hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url", "created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name =", "tweet_data.get('lang') if lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang,", "tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls']) > 0: tweet.contains_url = True for", "= datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): #", "Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if", "True for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) >", "dataset_obj): # update original tweet shared_count tmp_tweet = { 'id': status_id, 'user': {", "handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj) if original_tweet", "tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang') if lang != \"en\": return", "= user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if", "original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None: original_tweet.replied_to_count += 1 original_tweet.save()", "\"\"\" Given a dataset object, imports a tweet from json object into the", "\"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return", "sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url']", "datetime from email.utils import parsedate from msgvis.apps.questions.models import Article, Question from msgvis.apps.corpus.models import", "return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def get_or_create_language(code): lang, created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name):", "tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'):", "= Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text = tweet_data['text'] # created_at if", "# sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone'])", "and len(entities['user_mentions']) > 0: tweet.contains_mention = True for mention in entities['user_mentions']: mention_obj =", "created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'],", "= create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given", "questions = json.loads(json_str) for q in questions: source = q['source'] article, created =", "get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet", "return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created =", "if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender", "Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return", "sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str,", "Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain = urlparse_results.netloc url, created", "user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count =", "original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags if entities.get('hashtags') and", "# update original tweet shared_count tmp_tweet = { 'id': status_id, 'user': { 'id':", "True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and len(entities['media']) >", "get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name)", "domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media", "= get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return", "tweet.contains_media = True for me in entities['media']: tweet.media.add(get_or_create_media(me)) # user_mentions if entities.get('user_mentions') and", "# language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender =", "'venue': source['venue'], 'link': source['link']}) question = Question(source=article, text=q['text']) question.save() for dim in q['dimensions']:", "tweet_data.get('lang'): lang = tweet_data.get('lang') if lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj)", "tweet_data.get('retweeted_status') is not None: tweet.type = get_or_create_messagetype(\"retweet\") handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not", "short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def", "django.utils.timezone import utc from urlparse import urlparse import json from datetime import datetime", "sender.username = user_data['screen_name'] if user_data.get('name'): sender.full_name = user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0]", "user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a", "# media if entities.get('media') and len(entities['media']) > 0: tweet.contains_media = True for me", "for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save() tweet.mentions.add(mention_obj)", "tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'):", "Given a dataset object, imports a tweet from json string into the dataset.", "def load_research_questions_from_json(json_str): \"\"\" Load research questions from json string \"\"\" questions = json.loads(json_str)", "dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if tweet_data.get('retweeted_status') is", "\"\"\" Given a dataset object, imports a tweet from json string into the", "= create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type if", "and len(entities['hashtags']) > 0: tweet.contains_hashtag = True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) #", "} original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj) if original_tweet is not None: original_tweet.replied_to_count += 1", "json string into the dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang =", "update original tweet shared_count tmp_tweet = { 'id': status_id, 'user': { 'id': user_id,", "urlparse_results.netloc url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created", "= json.loads(json_str) for q in questions: source = q['source'] article, created = Article.objects.get_or_create(title=source['title'],", "user_id, screen_name, dataset_obj): # update original tweet shared_count tmp_tweet = { 'id': status_id,", "True for hashtag in entities['hashtags']: tweet.hashtags.add(get_or_create_hashtag(hashtag)) # urls if entities.get('urls') and len(entities['urls']) >", "set_message_sentiment def create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username =", "tweet.contains_url = True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media') and", "handle_retweet(tweet_data['retweeted_status'], dataset_obj) elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'],", "0: tweet.contains_url = True for url in entities['urls']: tweet.urls.add(get_or_create_url(url)) # media if entities.get('media')", "imports a tweet from json string into the dataset. \"\"\" tweet_data = json.loads(json_str)", "string into the dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'): lang = tweet_data.get('lang')", "user_data.get('statuses_count'): sender.message_count = user_data['statuses_count'] if user_data.get('profile_image_url'): sender.profile_image_url = user_data['profile_image_url'] sender.save() return sender def", "user_mentions if entities.get('user_mentions') and len(entities['user_mentions']) > 0: tweet.contains_mention = True for mention in", "the dataset. \"\"\" if 'in_reply_to_status_id' not in tweet_data: return None # if tweet_data.get('lang')", "mtype, created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return", "original_tweet.save() original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count", "= Timezone.objects.get_or_create(name=name) return zone def get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype def", "True for mention in entities['user_mentions']: mention_obj = create_an_user_from_json_obj(mention, dataset_obj) mention_obj.mentioned_count += 1 mention_obj.save()", "original_tweet.sender.replied_to_count += 1 original_tweet.sender.save() def handle_retweet(retweeted_status, dataset_obj): # update original tweet shared_count original_tweet", "return None tweet, created = Message.objects.get_or_create(dataset=dataset_obj, original_id=tweet_data['id']) # text if tweet_data.get('text'): tweet.text =", "tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone", "sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions from", "return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original tweet shared_count tmp_tweet", "import urlparse import json from datetime import datetime from email.utils import parsedate from", "handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment set_message_sentiment(tweet, save=False) tweet.save() return tweet def load_research_questions_from_json(json_str): \"\"\"", "screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else: tweet.type = get_or_create_messagetype('tweet') if tweet_data.get('entities'): handle_entities(tweet, tweet_data.get('entities'), dataset_obj) # sentiment", "created = Language.objects.get_or_create(code=code) return lang def get_or_create_timezone(name): zone, created = Timezone.objects.get_or_create(name=name) return zone", "original_tweet.shared_count += 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): #", "= Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url']) return media def handle_reply_to(status_id, user_id, screen_name, dataset_obj): # update original", "user_data['profile_image_url'] sender.save() return sender def create_an_instance_from_json(json_str, dataset_obj): \"\"\" Given a dataset object, imports", "return tweet def load_research_questions_from_json(json_str): \"\"\" Load research questions from json string \"\"\" questions", "dataset object, imports a tweet from json string into the dataset. \"\"\" tweet_data", "tweet from json string into the dataset. \"\"\" tweet_data = json.loads(json_str) if tweet_data.get('lang'):", "ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text']) return ht def get_or_create_url(urlblob): urlparse_results = urlparse(urlblob['expanded_url']) domain =", "lang = tweet_data.get('lang') if lang != \"en\": return False return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj) def", "tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if tweet_data['user'].get('time_zone'): tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone']) # type", "get_or_create_messagetype(name): mtype, created = MessageType.objects.get_or_create(name=name) return mtype def get_or_create_hashtag(hashtagblob): ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text'])", "= tweet_data['text'] # created_at if tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if", "'user': { 'id': user_id, 'screen_name': screen_name, }, 'in_reply_to_status_id': None } original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet,", "= get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj) # time_zone if", "mention_obj.save() tweet.mentions.add(mention_obj) def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj): \"\"\" Given a dataset object, imports a tweet", "json from datetime import datetime from email.utils import parsedate from msgvis.apps.questions.models import Article,", "user_data['name'] if user_data.get('lang'): sender.language = Language.objects.get_or_create(code=user_data['lang'])[0] if user_data.get('friends_count'): sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'):", "if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if tweet_data.get('user'): # sender tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj)", "tweet_data.get('created_at'): tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc) # language if tweet_data.get('lang'): tweet.language = get_or_create_language(tweet_data['lang']) if", "= q['source'] article, created = Article.objects.get_or_create(title=source['title'], defaults={'authors': source['authors'], 'year': source['year'], 'venue': source['venue'], 'link':", "create_an_user_from_json_obj(user_data, dataset_obj): sender, created = Person.objects.get_or_create(dataset=dataset_obj, original_id=user_data['id']) if user_data.get('screen_name'): sender.username = user_data['screen_name'] if", "elif tweet_data.get('in_reply_to_status_id') is not None: tweet.type = get_or_create_messagetype(\"reply\") handle_reply_to(status_id=tweet_data['in_reply_to_status_id'], user_id=tweet_data['in_reply_to_user_id'], screen_name=tweet_data['in_reply_to_screen_name'], dataset_obj=dataset_obj) else:", "+= 1 original_tweet.save() original_tweet.sender.shared_count += 1 original_tweet.sender.save() def handle_entities(tweet, entities, dataset_obj): # hashtags", "if entities.get('media') and len(entities['media']) > 0: tweet.contains_media = True for me in entities['media']:", "sender.friend_count = user_data['friends_count'] if user_data.get('followers_count'): sender.follower_count = user_data['followers_count'] if user_data.get('statuses_count'): sender.message_count = user_data['statuses_count']", "= Url.objects.get_or_create(full_url=urlblob['expanded_url'], domain=domain, short_url=urlblob['url']) return url def get_or_create_media(mediablob): media, created = Media.objects.get_or_create(type=mediablob['type'], media_url=mediablob['media_url'])" ]
[ "= True \"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None) total = relative_time(default=None) interval", "\"\"\" [[SLEEP]] # to allow the section names to be arbitrary # the", "for the end-time (11-12-2013 8:45 pm) # 'total' should be a timestamp for", "'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\"", "'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose'", "be used end = <absolute time> total = <relative time> interval = 1", "(default = 1 second). Use the same formatting as the `total` option. {bold}verbose{reset}", "if self._sections is None: self._sections = sections return self._sections @property def product(self): \"\"\"", "startup # only one of absolute or relative time is required, although both", "@property def subsection(self): \"\"\" the plugin sub-section \"\"\" if self._subsection is None: configspec", "be <amount> <units> (1 minute) # if verbose is False, sceen output will", "sections = OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until", "for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return @property def subsection(self):", "of the APE. At this point all calls to sleep will get the", "import ConfigObj # this package from theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep", "is False, sceen output will be off except at startup # only one", "same formatting as the `total` option. {bold}verbose{reset} : If True (the default) then", "TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args,", "be off except at startup # only one of absolute or relative time", "= 'ape' class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\" def __init__(self, *args,", "# the plugin names are required plugin = Sleep # 'end' should be", "sleep will get the same configuration.' sections['configuration'] = configuration sections['see also'] = 'EventTimer,", "that can be interpreted by `dateutil.parser.parse`. This is for the cases where you", "'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]] # to", "absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True) \"\"\" sections =", "= \"\"\" end = absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1) verbose =", "ConfigObj # this package from theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from", "be arbitrary # the plugin names are required plugin = Sleep # 'end'", "import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP'", "'ape' class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\" def __init__(self, *args, **kwargs):", "same configuration.' sections['configuration'] = configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] =", "the sleep runs. One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything", "where you have a specific time that you want the sleep to end.", "pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units only use the", "are required plugin = Sleep # 'end' should be a timestamp for the", "__init__(self, *args, **kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection =", "the end-time (11-12-2013 8:45 pm) # 'total' should be a timestamp for the", "e.g. '3.4 hours'. Most units only use the first letter, but since `months`", "the insertion of a pause in the execution of the APE. At this", "self._subsection = section return self._subsection def fetch_config(self): \"\"\" prints a config-file sample \"\"\"", "letter, but since `months` and `minutes` both start with `m`, you have to", "this package from theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import", "self.subsection[INTERVAL_OPTION] if interval != 1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product =", "ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection", "# to allow the section names to be arbitrary # the plugin names", "absolute or relative time is required, although both can be used end =", "is optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep", "\"\"\" prints a config-file sample \"\"\" print(configuration) @property def sections(self): \"\"\" Help dictionary", "only one of absolute or relative time is required, although both can be", "configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section", "True (the default) then report time remaining at specified intervals while the sleep", "in the execution of the APE. At this point all calls to sleep", "relative_time(default=1) verbose = boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset} -- a", "a timestamp for the end-time (11-12-2013 8:45 pm) # 'total' should be a", "is None: self._sections = sections return self._sections @property def product(self): \"\"\" A built", "TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]]", "sleep_configspec = \"\"\" end = absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1) verbose", "the sleep to end. {bold}total{reset} : a relative time given as pairs of", "specify them. The sleep will stop at the start of the sleep +", "timer that blocks until time is over' sections['description'] = '{bold}sleep{reset} is a verbose", "OrderedDict # third party from configobj import ConfigObj # this package from theape", "= sections return self._sections @property def product(self): \"\"\" A built TheBigSleep object :return:", "a relative time given as pairs of '<amount> <units>' -- e.g. '3.4 hours'.", "at startup # only one of absolute or relative time is required, although", "a specific time that you want the sleep to end. {bold}total{reset} : a", "end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval != 1:", "the start of the sleep + the total time given. {bold}interval{reset} : The", "end-time (11-12-2013 8:45 pm) # 'total' should be a timestamp for the run-time", "countdown timer that blocks until time is over' sections['description'] = '{bold}sleep{reset} is a", "Help dictionary \"\"\" if self._sections is None: self._sections = sections return self._sections @property", "= boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown timer", "= '{bold}sleep{reset} -- a countdown timer that blocks until time is over' sections['description']", "boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown timer that", "sub-section \"\"\" if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section =", "specific time that you want the sleep to end. {bold}total{reset} : a relative", "units only use the first letter, but since `months` and `minutes` both start", "SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION", "'<amount> <units>' -- e.g. '3.4 hours'. Most units only use the first letter,", "the `total` option. {bold}verbose{reset} : If True (the default) then report time remaining", "absolute time given as a time-stamp that can be interpreted by `dateutil.parser.parse`. This", "you have to use two letters to specify them. The sleep will stop", "end. {bold}total{reset} : a relative time given as pairs of '<amount> <units>' --", "def product(self): \"\"\" A built TheBigSleep object :return: TheBigSleep \"\"\" if self._product is", "= 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration options -- {bold}end{reset} :", "will be off except at startup # only one of absolute or relative", "that blocks until time is over' sections['description'] = '{bold}sleep{reset} is a verbose no-op", "be a timestamp for the run-time (1 hr 23 minutes) # 'interval' should", "to be specified. Everything else is optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin):", "of '<amount> <units>' -- e.g. '3.4 hours'. Most units only use the first", "allow the insertion of a pause in the execution of the APE. At", "you want the sleep to end. {bold}total{reset} : a relative time given as", "total = relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True) \"\"\" sections = OrderedDict()", "is over' sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant to", "time remaining at specified intervals while the sleep runs. One of {bold}end{reset} or", "if interval != 1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product = TheBigSleep(end=end,", "time given as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units", "time> interval = 1 second verbose = True \"\"\" sleep_configspec = \"\"\" end", "configuration = \"\"\" [[SLEEP]] # to allow the section names to be arbitrary", "all calls to sleep will get the same configuration.' sections['configuration'] = configuration sections['see", ": a relative time given as pairs of '<amount> <units>' -- e.g. '3.4", "import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION =", "total time given. {bold}interval{reset} : The amount of time beween reports of the", "sections return self._sections @property def product(self): \"\"\" A built TheBigSleep object :return: TheBigSleep", "Most units only use the first letter, but since `months` and `minutes` both", "**kwargs) self._subsection = None return @property def subsection(self): \"\"\" the plugin sub-section \"\"\"", "optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\"", "then report time remaining at specified intervals while the sleep runs. One of", "import OrderedDict # third party from configobj import ConfigObj # this package from", "to allow the section names to be arbitrary # the plugin names are", "configuration.' sections['configuration'] = configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\"", "= 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]] #", "by `dateutil.parser.parse`. This is for the cases where you have a specific time", "the same formatting as the `total` option. {bold}verbose{reset} : If True (the default)", "if self._product is None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION]", "time that you want the sleep to end. {bold}total{reset} : a relative time", "for TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep,", "sleep + the total time given. {bold}interval{reset} : The amount of time beween", "while the sleep runs. One of {bold}end{reset} or {bold}total{reset} needs to be specified.", "sceen output will be off except at startup # only one of absolute", "the cases where you have a specific time that you want the sleep", "sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until time is over'", "time-stamp that can be interpreted by `dateutil.parser.parse`. This is for the cases where", "be interpreted by `dateutil.parser.parse`. This is for the cases where you have a", "except at startup # only one of absolute or relative time is required,", "can be used end = <absolute time> total = <relative time> interval =", "= interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product = TheBigSleep(end=end, total=total, interval=interval, verbose=verbose) return self._product", "section.validate(time_validator) self._subsection = section return self._subsection def fetch_config(self): \"\"\" prints a config-file sample", "built TheBigSleep object :return: TheBigSleep \"\"\" if self._product is None: end = self.subsection[END_OPTION]", "should be <amount> <units> (1 minute) # if verbose is False, sceen output", "(1 minute) # if verbose is False, sceen output will be off except", "time beween reports of the time remaining (default = 1 second). Use the", "`total` option. {bold}verbose{reset} : If True (the default) then report time remaining at", "8:45 pm) # 'total' should be a timestamp for the run-time (1 hr", "{bold}end{reset} : an absolute time given as a time-stamp that can be interpreted", "def fetch_config(self): \"\"\" prints a config-file sample \"\"\" print(configuration) @property def sections(self): \"\"\"", "cases where you have a specific time that you want the sleep to", "beween reports of the time remaining (default = 1 second). Use the same", "for the run-time (1 hr 23 minutes) # 'interval' should be <amount> <units>", "{bold}total{reset} : a relative time given as pairs of '<amount> <units>' -- e.g.", "options -- {bold}end{reset} : an absolute time given as a time-stamp that can", "two letters to specify them. The sleep will stop at the start of", "the sleep + the total time given. {bold}interval{reset} : The amount of time", "intervals while the sleep runs. One of {bold}end{reset} or {bold}total{reset} needs to be", "second verbose = True \"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None) total =", "# only one of absolute or relative time is required, although both can", "VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]] # to allow the section names", "OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until time is", "\"\"\" if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header],", "interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product = TheBigSleep(end=end, total=total, interval=interval, verbose=verbose) return", "<amount> <units> (1 minute) # if verbose is False, sceen output will be", "both can be used end = <absolute time> total = <relative time> interval", "= ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return", "<relative time> interval = 1 second verbose = True \"\"\" sleep_configspec = \"\"\"", "One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is optional.", "= self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval != 1: interval = interval.total_seconds() verbose", "them. The sleep will stop at the start of the sleep + the", "should be a timestamp for the end-time (11-12-2013 8:45 pm) # 'total' should", "`minutes` both start with `m`, you have to use two letters to specify", "self).__init__(*args, **kwargs) self._subsection = None return @property def subsection(self): \"\"\" the plugin sub-section", "have to use two letters to specify them. The sleep will stop at", "1 second). Use the same formatting as the `total` option. {bold}verbose{reset} : If", "use the first letter, but since `months` and `minutes` both start with `m`,", "names to be arbitrary # the plugin names are required plugin = Sleep", "the first letter, but since `months` and `minutes` both start with `m`, you", "'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration options -- {bold}end{reset} : an", "self._subsection = None return @property def subsection(self): \"\"\" the plugin sub-section \"\"\" if", "the APE. At this point all calls to sleep will get the same", ": an absolute time given as a time-stamp that can be interpreted by", "self._sections = sections return self._sections @property def product(self): \"\"\" A built TheBigSleep object", "-- e.g. '3.4 hours'. Most units only use the first letter, but since", "interval != 1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product = TheBigSleep(end=end, total=total,", "(1 hr 23 minutes) # 'interval' should be <amount> <units> (1 minute) #", "END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration", "A built TheBigSleep object :return: TheBigSleep \"\"\" if self._product is None: end =", "this point all calls to sleep will get the same configuration.' sections['configuration'] =", "a pause in the execution of the APE. At this point all calls", "def __init__(self, *args, **kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection", "third party from configobj import ConfigObj # this package from theape import BasePlugin", "TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION =", "\"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return @property", "want the sleep to end. {bold}total{reset} : a relative time given as pairs", "1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product = TheBigSleep(end=end, total=total, interval=interval, verbose=verbose)", "**kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return", "This is for the cases where you have a specific time that you", "also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration options -- {bold}end{reset}", "second). Use the same formatting as the `total` option. {bold}verbose{reset} : If True", "= 'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]] # to allow the", "None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection =", "print(configuration) @property def sections(self): \"\"\" Help dictionary \"\"\" if self._sections is None: self._sections", "output will be off except at startup # only one of absolute or", "pause in the execution of the APE. At this point all calls to", "a time-stamp that can be interpreted by `dateutil.parser.parse`. This is for the cases", "an absolute time given as a time-stamp that can be interpreted by `dateutil.parser.parse`.", "return self._subsection def fetch_config(self): \"\"\" prints a config-file sample \"\"\" print(configuration) @property def", "super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return @property def subsection(self): \"\"\" the plugin", "time given as a time-stamp that can be interpreted by `dateutil.parser.parse`. This is", "sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration options --", "reports of the time remaining (default = 1 second). Use the same formatting", "at specified intervals while the sleep runs. One of {bold}end{reset} or {bold}total{reset} needs", "self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval != 1: interval = interval.total_seconds() verbose =", "!= 1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product = TheBigSleep(end=end, total=total, interval=interval,", "None: self._sections = sections return self._sections @property def product(self): \"\"\" A built TheBigSleep", "-- a countdown timer that blocks until time is over' sections['description'] = '{bold}sleep{reset}", "Use the same formatting as the `total` option. {bold}verbose{reset} : If True (the", "given as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units only", "is a verbose no-op (by default) meant to allow the insertion of a", "both start with `m`, you have to use two letters to specify them.", "# python standard library from collections import OrderedDict # third party from configobj", "of a pause in the execution of the APE. At this point all", "relative time given as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most", "plugin for TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor for Sleep \"\"\"", "names are required plugin = Sleep # 'end' should be a timestamp for", "Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return @property def subsection(self): \"\"\"", "configobj import ConfigObj # this package from theape import BasePlugin from theape.parts.sleep.sleep import", "23 minutes) # 'interval' should be <amount> <units> (1 minute) # if verbose", "or relative time is required, although both can be used end = <absolute", "= '{bold}sleep{reset} is a verbose no-op (by default) meant to allow the insertion", "theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end'", "allow the section names to be arbitrary # the plugin names are required", "= configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration", "# this package from theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap", "the time remaining (default = 1 second). Use the same formatting as the", "\"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1)", "and `minutes` both start with `m`, you have to use two letters to", "= 1 second verbose = True \"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None)", "The amount of time beween reports of the time remaining (default = 1", "since `months` and `minutes` both start with `m`, you have to use two", "should be a timestamp for the run-time (1 hr 23 minutes) # 'interval'", "will get the same configuration.' sections['configuration'] = configuration sections['see also'] = 'EventTimer, RelativeTime,", "= relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True) \"\"\" sections = OrderedDict() sections['name']", "section return self._subsection def fetch_config(self): \"\"\" prints a config-file sample \"\"\" print(configuration) @property", "self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator)", "`m`, you have to use two letters to specify them. The sleep will", "= 'verbose' configuration = \"\"\" [[SLEEP]] # to allow the section names to", "= 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration =", "{bold}total{reset} needs to be specified. Everything else is optional. \"\"\" sections['author'] = 'ape'", "option. {bold}verbose{reset} : If True (the default) then report time remaining at specified", "interval = relative_time(default=1) verbose = boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset}", "sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\" def __init__(self,", "report time remaining at specified intervals while the sleep runs. One of {bold}end{reset}", "theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION", "= self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval != 1: interval", "interval = 1 second verbose = True \"\"\" sleep_configspec = \"\"\" end =", "sections['configuration'] = configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The", "can be interpreted by `dateutil.parser.parse`. This is for the cases where you have", "plugin sub-section \"\"\" if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section", "'3.4 hours'. Most units only use the first letter, but since `months` and", "'end' should be a timestamp for the end-time (11-12-2013 8:45 pm) # 'total'", "as a time-stamp that can be interpreted by `dateutil.parser.parse`. This is for the", "point all calls to sleep will get the same configuration.' sections['configuration'] = configuration", "Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return @property def", "'{bold}sleep{reset} is a verbose no-op (by default) meant to allow the insertion of", "python standard library from collections import OrderedDict # third party from configobj import", "specified. Everything else is optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A", "if verbose is False, sceen output will be off except at startup #", "from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total'", "= \"\"\" The configuration options -- {bold}end{reset} : an absolute time given as", "self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval != 1: interval =", "= OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until time", "[[SLEEP]] # to allow the section names to be arbitrary # the plugin", "required plugin = Sleep # 'end' should be a timestamp for the end-time", "dictionary \"\"\" if self._sections is None: self._sections = sections return self._sections @property def", "time given. {bold}interval{reset} : The amount of time beween reports of the time", "verbose = boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown", "= ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection def fetch_config(self): \"\"\" prints", "self._subsection def fetch_config(self): \"\"\" prints a config-file sample \"\"\" print(configuration) @property def sections(self):", "\"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None return @property def subsection(self): \"\"\" the", "= section return self._subsection def fetch_config(self): \"\"\" prints a config-file sample \"\"\" print(configuration)", "from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION =", "prints a config-file sample \"\"\" print(configuration) @property def sections(self): \"\"\" Help dictionary \"\"\"", "\"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\" def", "timestamp for the run-time (1 hr 23 minutes) # 'interval' should be <amount>", "config-file sample \"\"\" print(configuration) @property def sections(self): \"\"\" Help dictionary \"\"\" if self._sections", "letters to specify them. The sleep will stop at the start of the", "a timestamp for the run-time (1 hr 23 minutes) # 'interval' should be", "# if verbose is False, sceen output will be off except at startup", "blocks until time is over' sections['description'] = '{bold}sleep{reset} is a verbose no-op (by", "@property def sections(self): \"\"\" Help dictionary \"\"\" if self._sections is None: self._sections =", "will stop at the start of the sleep + the total time given.", "over' sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant to allow", "of absolute or relative time is required, although both can be used end", "*args, **kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs) self._subsection = None", "meant to allow the insertion of a pause in the execution of the", "first letter, but since `months` and `minutes` both start with `m`, you have", "theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION =", "product(self): \"\"\" A built TheBigSleep object :return: TheBigSleep \"\"\" if self._product is None:", "= 1 second). Use the same formatting as the `total` option. {bold}verbose{reset} :", "is required, although both can be used end = <absolute time> total =", "calls to sleep will get the same configuration.' sections['configuration'] = configuration sections['see also']", "'{bold}sleep{reset} -- a countdown timer that blocks until time is over' sections['description'] =", "RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration options -- {bold}end{reset} : an absolute", "<absolute time> total = <relative time> interval = 1 second verbose = True", "the same configuration.' sections['configuration'] = configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options']", "remaining (default = 1 second). Use the same formatting as the `total` option.", "is None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval", "interval = self.subsection[INTERVAL_OPTION] if interval != 1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION]", "`months` and `minutes` both start with `m`, you have to use two letters", "given. {bold}interval{reset} : The amount of time beween reports of the time remaining", "the section names to be arbitrary # the plugin names are required plugin", "sleep to end. {bold}total{reset} : a relative time given as pairs of '<amount>", "else is optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A plugin for", "1 second verbose = True \"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None) total", "plugin = Sleep # 'end' should be a timestamp for the end-time (11-12-2013", "only use the first letter, but since `months` and `minutes` both start with", "'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]] # to allow the section", "the plugin names are required plugin = Sleep # 'end' should be a", "default) meant to allow the insertion of a pause in the execution of", "although both can be used end = <absolute time> total = <relative time>", "-- {bold}end{reset} : an absolute time given as a time-stamp that can be", "stop at the start of the sleep + the total time given. {bold}interval{reset}", "execution of the APE. At this point all calls to sleep will get", ": If True (the default) then report time remaining at specified intervals while", "False, sceen output will be off except at startup # only one of", "interpreted by `dateutil.parser.parse`. This is for the cases where you have a specific", "of time beween reports of the time remaining (default = 1 second). Use", "time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval'", "timestamp for the end-time (11-12-2013 8:45 pm) # 'total' should be a timestamp", "to end. {bold}total{reset} : a relative time given as pairs of '<amount> <units>'", "configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection def fetch_config(self): \"\"\" prints a config-file", "as the `total` option. {bold}verbose{reset} : If True (the default) then report time", "minute) # if verbose is False, sceen output will be off except at", "time remaining (default = 1 second). Use the same formatting as the `total`", "'verbose' configuration = \"\"\" [[SLEEP]] # to allow the section names to be", "list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection def", "'total' should be a timestamp for the run-time (1 hr 23 minutes) #", "time> total = <relative time> interval = 1 second verbose = True \"\"\"", "used end = <absolute time> total = <relative time> interval = 1 second", "standard library from collections import OrderedDict # third party from configobj import ConfigObj", "configuration options -- {bold}end{reset} : an absolute time given as a time-stamp that", "remaining at specified intervals while the sleep runs. One of {bold}end{reset} or {bold}total{reset}", "of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is optional. \"\"\"", "be specified. Everything else is optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\"", "<units> (1 minute) # if verbose is False, sceen output will be off", "\"\"\" if self._sections is None: self._sections = sections return self._sections @property def product(self):", "arbitrary # the plugin names are required plugin = Sleep # 'end' should", "class Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\"", "def sections(self): \"\"\" Help dictionary \"\"\" if self._sections is None: self._sections = sections", "the execution of the APE. At this point all calls to sleep will", "sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant to allow the", "def subsection(self): \"\"\" the plugin sub-section \"\"\" if self._subsection is None: configspec =", "pm) # 'total' should be a timestamp for the run-time (1 hr 23", "relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] =", "total = <relative time> interval = 1 second verbose = True \"\"\" sleep_configspec", "default) then report time remaining at specified intervals while the sleep runs. One", "sections['options'] = \"\"\" The configuration options -- {bold}end{reset} : an absolute time given", "end = <absolute time> total = <relative time> interval = 1 second verbose", "have a specific time that you want the sleep to end. {bold}total{reset} :", "\"\"\" A plugin for TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor for", "section names to be arbitrary # the plugin names are required plugin =", "library from collections import OrderedDict # third party from configobj import ConfigObj #", "given as a time-stamp that can be interpreted by `dateutil.parser.parse`. This is for", "\"\"\" end = absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True)", "to specify them. The sleep will stop at the start of the sleep", "None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval !=", "start with `m`, you have to use two letters to specify them. The", "required, although both can be used end = <absolute time> total = <relative", "\"\"\" if self._product is None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval =", "but since `months` and `minutes` both start with `m`, you have to use", "(by default) meant to allow the insertion of a pause in the execution", "section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection def fetch_config(self): \"\"\"", "hours'. Most units only use the first letter, but since `months` and `minutes`", "None return @property def subsection(self): \"\"\" the plugin sub-section \"\"\" if self._subsection is", "a countdown timer that blocks until time is over' sections['description'] = '{bold}sleep{reset} is", "`dateutil.parser.parse`. This is for the cases where you have a specific time that", "collections import OrderedDict # third party from configobj import ConfigObj # this package", "AbsoluteTime' sections['options'] = \"\"\" The configuration options -- {bold}end{reset} : an absolute time", "is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection", "relative time is required, although both can be used end = <absolute time>", "one of absolute or relative time is required, although both can be used", "self._sections @property def product(self): \"\"\" A built TheBigSleep object :return: TheBigSleep \"\"\" if", ":return: TheBigSleep \"\"\" if self._product is None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION]", "(11-12-2013 8:45 pm) # 'total' should be a timestamp for the run-time (1", "(the default) then report time remaining at specified intervals while the sleep runs.", "sleep runs. One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else", "start of the sleep + the total time given. {bold}interval{reset} : The amount", "APE. At this point all calls to sleep will get the same configuration.'", "# third party from configobj import ConfigObj # this package from theape import", "for the cases where you have a specific time that you want the", "+ the total time given. {bold}interval{reset} : The amount of time beween reports", "TheBigSleep \"\"\" if self._product is None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval", "until time is over' sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default)", "a verbose no-op (by default) meant to allow the insertion of a pause", "\"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks", "INTERVAL_OPTION = 'interval' VERBOSE_OPTION = 'verbose' configuration = \"\"\" [[SLEEP]] # to allow", "subsection(self): \"\"\" the plugin sub-section \"\"\" if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(),", "use two letters to specify them. The sleep will stop at the start", "if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec)", "Sleep(BasePlugin): \"\"\" A plugin for TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor", "be a timestamp for the end-time (11-12-2013 8:45 pm) # 'total' should be", "from configobj import ConfigObj # this package from theape import BasePlugin from theape.parts.sleep.sleep", "the run-time (1 hr 23 minutes) # 'interval' should be <amount> <units> (1", "@property def product(self): \"\"\" A built TheBigSleep object :return: TheBigSleep \"\"\" if self._product", "= <relative time> interval = 1 second verbose = True \"\"\" sleep_configspec =", "end = absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True) \"\"\"", "runs. One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is", "The sleep will stop at the start of the sleep + the total", "get the same configuration.' sections['configuration'] = configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime'", "sample \"\"\" print(configuration) @property def sections(self): \"\"\" Help dictionary \"\"\" if self._sections is", "\"\"\" print(configuration) @property def sections(self): \"\"\" Help dictionary \"\"\" if self._sections is None:", "self._product is None: end = self.subsection[END_OPTION] total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if", "time is over' sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant", "off except at startup # only one of absolute or relative time is", "of the time remaining (default = 1 second). Use the same formatting as", "\"\"\" Help dictionary \"\"\" if self._sections is None: self._sections = sections return self._sections", "= 'SLEEP' END_OPTION = 'end' TOTAL_OPTION = 'total' INTERVAL_OPTION = 'interval' VERBOSE_OPTION =", "you have a specific time that you want the sleep to end. {bold}total{reset}", "minutes) # 'interval' should be <amount> <units> (1 minute) # if verbose is", "configuration sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime' sections['options'] = \"\"\" The configuration options", "At this point all calls to sleep will get the same configuration.' sections['configuration']", "sleep will stop at the start of the sleep + the total time", "verbose is False, sceen output will be off except at startup # only", "Everything else is optional. \"\"\" sections['author'] = 'ape' class Sleep(BasePlugin): \"\"\" A plugin", "{bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is optional. \"\"\" sections['author']", "the total time given. {bold}interval{reset} : The amount of time beween reports of", "= Sleep # 'end' should be a timestamp for the end-time (11-12-2013 8:45", "a config-file sample \"\"\" print(configuration) @property def sections(self): \"\"\" Help dictionary \"\"\" if", "total = self.subsection[TOTAL_OPTION] interval = self.subsection[INTERVAL_OPTION] if interval != 1: interval = interval.total_seconds()", "return self._sections @property def product(self): \"\"\" A built TheBigSleep object :return: TheBigSleep \"\"\"", "verbose no-op (by default) meant to allow the insertion of a pause in", "the plugin sub-section \"\"\" if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False, _inspec=True)", "needs to be specified. Everything else is optional. \"\"\" sections['author'] = 'ape' class", "import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION = 'end' TOTAL_OPTION", "or {bold}total{reset} needs to be specified. Everything else is optional. \"\"\" sections['author'] =", "{bold}interval{reset} : The amount of time beween reports of the time remaining (default", "from collections import OrderedDict # third party from configobj import ConfigObj # this", "insertion of a pause in the execution of the APE. At this point", "= None return @property def subsection(self): \"\"\" the plugin sub-section \"\"\" if self._subsection", "as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units only use", "= relative_time(default=1) verbose = boolean(default=True) \"\"\" sections = OrderedDict() sections['name'] = '{bold}sleep{reset} --", "amount of time beween reports of the time remaining (default = 1 second).", "self._sections is None: self._sections = sections return self._sections @property def product(self): \"\"\" A", ": The amount of time beween reports of the time remaining (default =", "plugin names are required plugin = Sleep # 'end' should be a timestamp", "to sleep will get the same configuration.' sections['configuration'] = configuration sections['see also'] =", "return @property def subsection(self): \"\"\" the plugin sub-section \"\"\" if self._subsection is None:", "BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION = 'SLEEP' END_OPTION", "specified intervals while the sleep runs. One of {bold}end{reset} or {bold}total{reset} needs to", "TheBigSleep object :return: TheBigSleep \"\"\" if self._product is None: end = self.subsection[END_OPTION] total", "sections(self): \"\"\" Help dictionary \"\"\" if self._sections is None: self._sections = sections return", "fetch_config(self): \"\"\" prints a config-file sample \"\"\" print(configuration) @property def sections(self): \"\"\" Help", "object :return: TheBigSleep \"\"\" if self._product is None: end = self.subsection[END_OPTION] total =", "_inspec=True) section = ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection def fetch_config(self):", "<units>' -- e.g. '3.4 hours'. Most units only use the first letter, but", "at the start of the sleep + the total time given. {bold}interval{reset} :", "to be arbitrary # the plugin names are required plugin = Sleep #", "party from configobj import ConfigObj # this package from theape import BasePlugin from", "\"\"\" The configuration options -- {bold}end{reset} : an absolute time given as a", "\"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor for Sleep \"\"\" super(Sleep, self).__init__(*args, **kwargs)", "run-time (1 hr 23 minutes) # 'interval' should be <amount> <units> (1 minute)", "time is required, although both can be used end = <absolute time> total", "'interval' should be <amount> <units> (1 minute) # if verbose is False, sceen", "\"\"\" A built TheBigSleep object :return: TheBigSleep \"\"\" if self._product is None: end", "is for the cases where you have a specific time that you want", "If True (the default) then report time remaining at specified intervals while the", "The configuration options -- {bold}end{reset} : an absolute time given as a time-stamp", "from theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator SLEEP_SECTION", "ConfigObj(self.configuration[self.section_header], configspec=configspec) section.validate(time_validator) self._subsection = section return self._subsection def fetch_config(self): \"\"\" prints a", "verbose = True \"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None) total = relative_time(default=None)", "of the sleep + the total time given. {bold}interval{reset} : The amount of", "A plugin for TheBigSleep \"\"\" def __init__(self, *args, **kwargs): \"\"\" Constructor for Sleep", "# 'end' should be a timestamp for the end-time (11-12-2013 8:45 pm) #", "Sleep # 'end' should be a timestamp for the end-time (11-12-2013 8:45 pm)", "with `m`, you have to use two letters to specify them. The sleep", "= absolute_time(default=None) total = relative_time(default=None) interval = relative_time(default=1) verbose = boolean(default=True) \"\"\" sections", "package from theape import BasePlugin from theape.parts.sleep.sleep import TheBigSleep from theape.infrastructure.timemap import time_validator", "# 'total' should be a timestamp for the run-time (1 hr 23 minutes)", "hr 23 minutes) # 'interval' should be <amount> <units> (1 minute) # if", "= \"\"\" [[SLEEP]] # to allow the section names to be arbitrary #", "# 'interval' should be <amount> <units> (1 minute) # if verbose is False,", "to allow the insertion of a pause in the execution of the APE.", "\"\"\" the plugin sub-section \"\"\" if self._subsection is None: configspec = ConfigObj(sleep_configspec.splitlines(), list_values=False,", "to use two letters to specify them. The sleep will stop at the", "True \"\"\" sleep_configspec = \"\"\" end = absolute_time(default=None) total = relative_time(default=None) interval =", "no-op (by default) meant to allow the insertion of a pause in the", "that you want the sleep to end. {bold}total{reset} : a relative time given", "{bold}verbose{reset} : If True (the default) then report time remaining at specified intervals", "formatting as the `total` option. {bold}verbose{reset} : If True (the default) then report", "= self.subsection[INTERVAL_OPTION] if interval != 1: interval = interval.total_seconds() verbose = self.subsection[VERBOSE_OPTION] self._product", "= <absolute time> total = <relative time> interval = 1 second verbose =" ]
[ "VerbBaseView(object): def get_verb_form(self, slug=None): if slug is None: slug = self.kwargs.get('slug') # convert", "slug is None: slug = self.kwargs.get('slug') # convert the slug into a form", "convert the slug into a form name form_name = \"\".join([x.title() for x in", "\"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context", "return form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context =", "slug into a form name form_name = \"\".join([x.title() for x in slug.split('-')]) +", "context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView): template_name =", "form name form_name = \"\".join([x.title() for x in slug.split('-')]) + \"Form\" form =", "name form_name = \"\".join([x.title() for x in slug.split('-')]) + \"Form\" form = getattr(verb_forms,", "= len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3']", "= VERB_LIST[quarter * 2:quarter * 3] context['verb_list4'] = VERB_LIST[quarter * 3:] return context", "form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView,", "self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self,", "context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3] context['verb_list4']", "context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs)", "if slug is None: slug = self.kwargs.get('slug') # convert the slug into a", "None: slug = self.kwargs.get('slug') # convert the slug into a form name form_name", "from django.views.generic import TemplateView from verbs import forms as verb_forms from verbs.utils import", "verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if slug is None: slug", "Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context", "getattr(verb_forms, form_name, None) if form is None: raise Http404 return form class VerbDetailView(VerbBaseView,", "is None: raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def", "* 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3] context['verb_list4'] = VERB_LIST[quarter *", "get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1'] =", "VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if slug is None: slug = self.kwargs.get('slug')", "if form is None: raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name =", "= getattr(verb_forms, form_name, None) if form is None: raise Http404 return form class", "template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST)", "# convert the slug into a form name form_name = \"\".join([x.title() for x", "4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter *", "+ \"Form\" form = getattr(verb_forms, form_name, None) if form is None: raise Http404", "import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if slug is None: slug =", "forms as verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if", "class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter", "= self.kwargs.get('slug') # convert the slug into a form name form_name = \"\".join([x.title()", "= \"\".join([x.title() for x in slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name, None)", "raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs):", "<reponame>Bionetbook/bionetbook<gh_stars>0 from django.http import Http404 from django.views.generic import TemplateView from verbs import forms", "get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView):", "the slug into a form name form_name = \"\".join([x.title() for x in slug.split('-')])", "self.kwargs.get('slug') # convert the slug into a form name form_name = \"\".join([x.title() for", "return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView,", "= VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter *", "\"\".join([x.title() for x in slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name, None) if", "context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs):", "django.http import Http404 from django.views.generic import TemplateView from verbs import forms as verb_forms", "2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3] context['verb_list4'] = VERB_LIST[quarter * 3:]", "form_name = \"\".join([x.title() for x in slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name,", "slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name, None) if form is None: raise", "verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if slug is", "= VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3] context['verb_list4'] =", "**kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView): template_name", "None) if form is None: raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name", "self.get_verb_form() return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context =", "self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter *", "from django.http import Http404 from django.views.generic import TemplateView from verbs import forms as", "form = getattr(verb_forms, form_name, None) if form is None: raise Http404 return form", "\"Form\" form = getattr(verb_forms, form_name, None) if form is None: raise Http404 return", "context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter", "VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter =", "TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] =", "template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form()", "= \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return", "super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter", "verbs import forms as verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self,", "= super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] =", "VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3]", "super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def", "get_verb_form(self, slug=None): if slug is None: slug = self.kwargs.get('slug') # convert the slug", "= self.get_verb_form() return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context", "\"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4", "context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2']", "as verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if slug", "context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3] context['verb_list4'] = VERB_LIST[quarter * 3:] return", "slug = self.kwargs.get('slug') # convert the slug into a form name form_name =", "len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] =", "def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class", "TemplateView from verbs import forms as verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object):", "slug=None): if slug is None: slug = self.kwargs.get('slug') # convert the slug into", "VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3] context['verb_list4'] = VERB_LIST[quarter", "import TemplateView from verbs import forms as verb_forms from verbs.utils import VERB_LIST class", "a form name form_name = \"\".join([x.title() for x in slug.split('-')]) + \"Form\" form", "None: raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self,", "from verbs import forms as verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object): def", "from verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None): if slug is None:", "/ 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2] context['verb_list3'] = VERB_LIST[quarter", "form_name, None) if form is None: raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView):", "form is None: raise Http404 return form class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\"", "quarter = len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter] context['verb_list2'] = VERB_LIST[quarter:quarter * 2]", "x in slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name, None) if form is", "import forms as verb_forms from verbs.utils import VERB_LIST class VerbBaseView(object): def get_verb_form(self, slug=None):", "= super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"] = self.get_verb_form() return context class VerbListView(TemplateView): template_name = \"verbs/verb_list.html\"", "in slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name, None) if form is None:", "django.views.generic import TemplateView from verbs import forms as verb_forms from verbs.utils import VERB_LIST", "for x in slug.split('-')]) + \"Form\" form = getattr(verb_forms, form_name, None) if form", "class VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs)", "class VerbBaseView(object): def get_verb_form(self, slug=None): if slug is None: slug = self.kwargs.get('slug') #", "into a form name form_name = \"\".join([x.title() for x in slug.split('-')]) + \"Form\"", "def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1']", "import Http404 from django.views.generic import TemplateView from verbs import forms as verb_forms from", "def get_verb_form(self, slug=None): if slug is None: slug = self.kwargs.get('slug') # convert the", "= \"verbs/verb_list.html\" def get_context_data(self, **kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) /", "VerbDetailView(VerbBaseView, TemplateView): template_name = \"verbs/verb_detail.html\" def get_context_data(self, **kwargs): context = super(VerbDetailView, self).get_context_data(**kwargs) context[\"verb\"]", "is None: slug = self.kwargs.get('slug') # convert the slug into a form name", "**kwargs): context = super(VerbListView, self).get_context_data(**kwargs) quarter = len(VERB_LIST) / 4 context['verb_list1'] = VERB_LIST[:quarter]", "Http404 from django.views.generic import TemplateView from verbs import forms as verb_forms from verbs.utils" ]
[ "Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the time step <Value> to", "return int(self.raw[3:5].decode()) @property def version(self): '''str : The version in major.minor format.''' return", "responses with each other if commands are being processed asynchronously. ''' return int(self.raw[24:30])", "For information about \"<IntervalOrListSpec>\" see Component Specification. See STK Help for more details", "def header_length(self): '''int : The header_length, should always be 42.''' return int(self.raw[3:5].decode()) @property", "socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of .close()'''", "period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define", "The port on which the desired instance is accepting connections. address : tuple", "to the STK Connect socket specified. Args: None Returns: None Raises: STKConnectError :", "a moment to start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else:", "'''Read all available data from the TCP/IP socket. Args: timeout : int or", "True: self.send(f'ConControl / AckOff') def _connect(self): attempt = 0 while True: attempt +=", "Bound <Value> to have the report steps calculated on a specific time boundary.", "get_ack(self, message): '''Block until an ACK is received from STK Connect. Users should", "bytestring = bytestring.decode() self.raw = bytestring def __repr__(self): return f'<{self.raw}>' @property def sync(self):", "or pre-data, such as a comparison object for the RIC report for a", "STK Help. Summary : str or None (default: None) Summary data is not", "range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] ==", "to be used in creating the report. This value is entered in seconds", "Intervals option to specify an STK interval file for the time period or", "int(self.raw[5].decode()) @property def minor_version(self): '''int : The minor version number.''' return int(self.raw[6].decode()) @property", "socket on {self.host}:{self.port}') time.sleep( 3 ) def send(self, message, attempts=None): '''Sends a Connect", "= self.get_single_message() if hdr.async_type == 'ACK': return True elif hdr.async_type == 'NACK': raise", "the current packet for this identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int :", "def report(self, **kwargs): '''Create a report in STK and save it to a", "recommended to leave this to True. connect_attempts : int (default: 5) The maximum", "message += f' TimePeriod {TimePeriod}' if TimeStep is not None: message += f'", "buffer = b'' while True: try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached,", "also specify at least one AccessObject. Or use the Intervals option to specify", "(host, port) ''' return (self.host, self.port) def connect(self): '''Connect to the STK Connect", "the file to which the report should be written. TimePeriod : str or", ": The sequence number of the current packet for this identifier.''' return int(self.raw[34:38])", "as time steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : Some", "AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"'", "all available data from the TCP/IP socket. Args: timeout : int or None", "STK hasn't finished initializing by the time this is called. send_attempts : int", "None ''' try: self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close()", "the instance of STK hasn't finished initializing by the time this is called.", "should not typically need to use this method directly, as it is called", "The timestep to use for the report. If None, then use the default", "] class AsyncHeader(): '''A helper class to read the STK Connect Asynchronous Message", "Satellite/A_Satellite_Name Style : str or path-like object (required) The Style name, if it", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a moment to start self._connect() if", "time period to use for the report. If None, then use the default", "the start time and stop time for the report span. For valid {TimeInterval}", "timeout is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is", "message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message +=", "have the summary data included in the exported report file. Specify the Include", "message): msg = self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK Received') return elif", "hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '':", "'''The socket address tuple. Args: None Returns: tuple : (host, port) ''' return", "AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) == 0: return [] return", "= self.read(**kwargs).decode() if len(buffer) == 0: return [] return [ x[18:] for x", "'''int : The length of the data field for the current packet.''' return", "message = f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message +=", "STK and save it to a file. Args: ObjPath : str (required) The", "values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start", "if len(sm) > 0: messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath, Style, FilePath,", "seconds. If 0 is entered then the default time step (usually 60 seconds)", "self.get_multi_message() return [x[1] for x in messages] # report = '' # for", "<Value> to have the report steps calculated on a specific time boundary. This", "elif msg == 'NAC': k = self.socket.recv(1).decode() msg = msg + k raise", "self.port) def connect(self): '''Connect to the STK Connect socket specified. Args: None Returns:", "be written. TimePeriod : str or None (default: None) The time period to", "host : str The host on which the desired instance of STK is", "used to associate the correct responses with each other if commands are being", "Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is not None: message += f' AccessObject", "def data_length(self): '''int : The length of the data field for the current", "in range(int(data)): sm = self.get_single_message() if len(sm) > 0: messages.append(sm) return messages @inherit_docstrings", "more details on these options. TimeStep : float or str The timestep to", "4 20:13:37 2020 @author: jolsten \"\"\" import sys, logging import socket import time", "containing the data received from the socket ''' timeout = timeout if timeout", "/ AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack is not True: self.send(f'ConControl /", "self.send_attempts attempt = 0 while True: attempt += 1 try: self._send(message) if self.ack:", "self.read(**kwargs).decode() # if len(buffer) == 0: return [] # logging.debug(f'Report_RM Returned: {buffer}') #", "a path to the desired .RST file. TimePeriod : str or None (default:", "kwargs self.host = str( kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port', 5001) )", "attempts at connecting to the socket. send_attempts : int Sets the default maximum", "caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect to STK via socket", "'': del msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None,", "Include Only Summary data is not generally included. Use this option, to have", "Help for more details on these options. TimeStep : float or str (default:", "= header.rstrip().split() length = int(length) data = self.socket.recv(length).decode() return header, data def get_multi_message(self):", "the summary data reported. Returns: None ''' pass @abstractmethod def report_rm(self, **kwargs): '''Create", "the report; use the Only value to have only the summary data reported.", "times as time steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData :", "None: message += f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) ==", "The Style name, if it is already loaded into STK (or is a", "hdr, data def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}')", "1 ) ) self.socket = None @property def address(self): '''The socket address tuple.", "a NACK is received. Returns: None Raises: STKNackError : If too many NACK", "except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self, message):", ": int (default: 5) The maximum number of attempts at connecting to the", "- len(data) ).decode() return hdr, data def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data", "msg = self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK Received') return elif msg", "network traffic is unpredictable, increasing the timeout will increase the likelihood that you", "Connect socket specified. Args: None Returns: None Raises: STKConnectError : If, after .connect_attempts", "None: message += f' AccessObject {AccessObjectPath}' if TimePeriod is not None: message +=", "to have the report steps calculated on a specific time boundary. This value", "instance is using ACK/NACK. Changing this after .connect() is called will not change", ".read() before assuming all data was received. ''' def __init__(self, **kwargs): '''Inits an", "rest of the report; use the Only value to have only the summary", "TimePeriod {TimePeriod}' if TimeStep is not None: message += f' TimeStep {TimeStep}' if", ": bool A boolean representing whether the instance is using ACK/NACK. Changing this", ": str or None (default: None) Some Report Styles require additional or pre-data,", "file. FilePath : str or path-like object (required) The path to the file", "the read() function returns. Returns: bytes : a bytes object containing the data", "= self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv( pdl ).decode()", "values: Include Only Specify the Include value to have the summary included with", "Returns: None ''' try: self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self):", "= float( kwargs.get('timeout', 1 ) ) self.socket = None @property def address(self): '''The", "specified. Args: None Returns: None Raises: STKConnectError : If, after .connect_attempts attempts, a", "== 0: return [] return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ]", "+= # return .join() # buffer = self.read(**kwargs).decode() # if len(buffer) == 0:", "tuple. Args: None Returns: tuple : (host, port) ''' return (self.host, self.port) def", "The host on which the desired instance of STK is running. port :", "specification. For help on creating the STK interval file, see Create & Import", "is not None: message += f' AccessObject {AccessObjectPath}' if TimePeriod is not None:", "TCP/IP socket. Args: timeout : int or None (default: None) Sets the timeout", "used. Or enter the Array keyword with a Time Array component specification to", "get_multi_message(self): logging.debug('Getting Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None]", "ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style", "exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}') return True finally: # continue loop", "received from STK. Examples: s.send(\"Unload / *\") ''' if attempts is None: attempts", "header_length, should always be 42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str : The", "def _connect(self): attempt = 0 while True: attempt += 1 try: self.socket.connect(self.address) except", "only the summary data reported. Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def", "@property def total_packets(self): '''int : The total number of packets in the current", "return int(self.raw[5].decode()) @property def minor_version(self): '''int : The minor version number.''' return int(self.raw[6].decode())", "before raising STKNackError. timeout : float Sets the default timeout period for calls", "'''Connect to the STK Connect socket specified. Args: None Returns: None Raises: STKConnectError", "kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts',", "sys, logging import socket import time from abc import ABCMeta, abstractmethod from .exceptions", "AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack is not True:", "the socket. Several attempts should be made, in case the instance of STK", "only the summary data reported. Returns: None ''' pass @abstractmethod def report_rm(self, **kwargs):", "report style). Otherwise, pass a path to the desired .RST file. FilePath :", "Args: message: A string containing the STK Connect command attempts: Optional; The maximum", "FilePath : str or path-like object (required) The path to the file to", ".RST file. FilePath : str or path-like object (required) The path to the", "'ACK': # logging.debug('ACK Received') return elif msg == 'NAC': k = self.socket.recv(1).decode() msg", ") self.timeout = float( kwargs.get('timeout', 1 ) ) self.socket = None @property def", "comparison object for the RIC report for a Satellite. For these types of", "'''Inits an STK connection object (Connect or AsyncConnect) Args: host : str (default:", "one AccessObject. Or use the Intervals option to specify an STK interval file", "= kwargs self.host = str( kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port', 5001)", "Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style", "[] return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A", "int The port on which the desired instance is accepting connections. address :", "string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The value of the response", "get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create a report in STK and save", "of times to send the command if a NACK is received. Returns: None", "in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The major version", "name, if it is already loaded into STK (or is a default report", "''' return int(self.raw[24:30]) @property def total_packets(self): '''int : The total number of packets", "from the TCP/IP socket. Args: timeout : int or None (default: None) Sets", "in STK and save it to a file. Args: ObjPath : str (required)", "None: message += f' AdditionalData \"{AdditionalData}\"' if Summary is not None: message +=", "an STK connection object (Connect or AsyncConnect) Args: host : str (default: 'localhost')", "number of packets in the current identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int", "instance of STK hasn't finished initializing by the time this is called. send_attempts", "loop if no exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}') return True finally:", "class to read the STK Connect Asynchronous Message Format headers.''' def __init__(self, bytestring):", ": str The host on which the desired instance of STK is running.", "(typically the parent object's timestep). Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter", "data was received. Because network traffic is unpredictable, increasing the timeout will increase", "pass @abstractmethod def get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create a report in", "bool( kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int(", "also adds a mandatory minimum delay before the read() function returns. ''' self._kwargs", "def disconnect(self): '''Alias of .close()''' self.close() def close(self): '''Closes the STK Connect socket.", "Otherwise, pass a path to the desired .RST file. TimePeriod : str or", "(default: 1) Sets the default maximum number of attempts to make while calling", "specification to use the array times as time steps. For information about \"<TimeArraySpec>\"", "time step (usually 60 seconds) is used. Or enter the Array keyword with", "number of attempts at connecting to the socket. send_attempts : int Sets the", "if msg == 'ACK': # logging.debug('ACK Received') return elif msg == 'NAC': k", "using ACK/NACK. Changing this after .connect() is called will not change the mode.", "5) ) self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout', 1", ": The header_length, should always be 42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str", "str or None (default: None) Some Report Styles require additional or pre-data, such", "the socket...') buffer = b'' while True: try: buffer += self.socket.recv(4096) except socket.timeout:", "Import External Files - Interval List in STK Help. For information about \"<IntervalOrListSpec>\"", "\"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is not None: message += f'", "{msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length =", "increasing the timeout will increase the likelihood that you receive all the data.", "report should be written. TimePeriod : str or None (default: None) The time", "def identifier(self): '''int : The value of the response ID. This should be", "included. Use this option, to have the summary data included in the exported", "Interval or Interval List component specification. For help on creating the STK interval", "maximum number of times to send the command if a NACK is received.", "ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath}", "@property def version(self): '''str : The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property", "interval file, see Create & Import External Files - Interval List in STK", "packet for this identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int : The length", "data included in the exported report file. Specify the Include value to have", "hdr.data_length: data += self.socket.recv( pdl - len(data) ).decode() return hdr, data def get_multi_message(self):", "using the raw values, passed as bytes or str.''' if isinstance(bytestring, bytes): bytestring", "''' pass @abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod def report(self,", "the default (typically the parent object's timestep). Valid values: <Value> Bound <Value> Array", "for calls to .read() before assuming all data was received. Because network traffic", "= f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is not", "or path-like object (required) The path to the file to which the report", "messages @inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None):", "close(self): '''Closes the STK Connect socket. Args: None Returns: None ''' try: self.socket.close()", "f' Summary {Summary}' if AllLines is not None: message += f' AllLines {AllLines}'", "True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg =", "The sequence number of the current packet for this identifier.''' return int(self.raw[34:38]) @property", "of STK hasn't finished initializing by the time this is called. send_attempts :", "all the data. However, this also adds a mandatory minimum delay before the", "= b'' while True: try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning", "address : tuple The address as a tuple (host, port) ack : bool", "to send the command if a NACK is received. Returns: None Raises: STKNackError", "None: message += f' TimePeriod {TimePeriod}' if TimeStep is not None: message +=", "ObjPath : str (required) The STK Object Path for the desired report. e.g.", "Message Format headers.''' def __init__(self, bytestring): '''Inits a new object using the raw", "''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode() if msg", "socket. Args: message: A string containing the STK Connect command attempts: Optional; The", "def version(self): '''str : The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def", "float (default: 1.0) Sets the default timeout period for calls to .read() before", "get_ack(self, message): msg = self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK Received') return", "word, should always be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int : The", "be between 0.000001 and 1000000000.0 seconds. Or enter Bound <Value> to have the", "many times') raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def", "bool A boolean representing whether the instance is using ACK/NACK. Changing this after", "attempt >= attempts: logging.error(f'send() failed, received NACK too many times') raise STKNackError(e) def", "if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}' if TimePeriod is", "message += f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) == 0:", "to associate the correct responses with each other if commands are being processed", "_AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes: host : str The host on", "report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM", "return True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg", "headers.''' def __init__(self, bytestring): '''Inits a new object using the raw values, passed", ": The value of the command type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self):", "current identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int : The sequence number of", "The minor version number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int : The length", "A string containing the STK Connect command attempts: Optional; The maximum number of", "to use this method directly, as it is called from .send() if the", "class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode() if msg == 'ACK':", "timestep). Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the time step <Value>", "# report += # return .join() # buffer = self.read(**kwargs).decode() # if len(buffer)", "the class attribute ack=True Args: None Returns: None ''' pass @abstractmethod def get_single_message(self):", "e: if attempt >= attempts: logging.error(f'send() failed, received NACK too many times') raise", "major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The major version number.'''", "The value of the response ID. This should be used to associate the", "seconds and must be between 0.000001 and 1000000000.0 seconds. Or enter Bound <Value>", "values, passed as bytes or str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw", "str (required) The STK Object Path for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name", "start time and stop time for the report span. For valid {TimeInterval} values", "else: self.send(f'ConControl / AsyncOff') if self.ack is not True: self.send(f'ConControl / AckOff') def", "port) ack : bool A boolean representing whether the instance is using ACK/NACK.", "not None: message += f' AdditionalData \"{AdditionalData}\"' if Summary is not None: message", "information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : str or None (default: None)", "maximum number of attempts to make while calling .send() before raising STKNackError. timeout", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Aug 4 20:13:37 2020", "to the desired .RST file. TimePeriod : str or None (default: None) The", "then use the default (typically the parent object's timestep). Valid values: <Value> Bound", "return elif msg == 'NAC': k = self.socket.recv(1).decode() msg = msg + k", "major version number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int : The minor version", "\"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int : The header_length, should always be", "sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length = int(length)", "version number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int : The minor version number.'''", "is accepting connections. address : tuple The address as a tuple (host, port)", "attempts, a connection couldn't be made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3)", "a default report style). Otherwise, pass a path to the desired .RST file.", "msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '': del msg_grp[-1] return msg_grp @inherit_docstrings def", "msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got:", "Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"'", "the desired .RST file. FilePath : str or path-like object (required) The path", "no data is left in the socket...') buffer = b'' while True: try:", "found at \"Report Additional Data\" in the STK Help. Summary : str or", "a file. Args: ObjPath : str (required) The STK Object Path for the", "str or path-like object (required) The Style name, if it is already loaded", "to define the start time and stop time for the report span. For", "data reported. Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg", "@property def data_length(self): '''int : The length of the data field for the", "return [x[1] for x in messages] # report = '' # for msg", "Report Styles require additional or pre-data, such as a comparison object for the", "not True: self.send(f'ConControl / AckOff') def _connect(self): attempt = 0 while True: attempt", "sync word, should always be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int :", "in the STK Help. Summary : str or None (default: None) Summary data", "TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"'", "logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name,", "1 try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop", "0 and 3600 seconds. If 0 is entered then the default time step", "<ObjectPath> and an AccessObject, but you must also specify at least one AccessObject.", "If, after .connect_attempts attempts, a connection couldn't be made successfully.' ''' self.socket =", "the default maximum number of attempts to make while calling .send() before raising", "Args: None Returns: None ''' pass @abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self):", "loaded into STK (or is a default report style). Otherwise, pass a path", "import socket import time from abc import ABCMeta, abstractmethod from .exceptions import *", "of the report; use the Only value to have only the summary data", "any exception caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect to STK", "msg == 'ACK': # logging.debug('ACK Received') return elif msg == 'NAC': k =", "of attempts to make while calling .send() before raising STKNackError. timeout : int", "in seconds and must be between 0 and 3600 seconds. If 0 is", "self.socket = None @property def address(self): '''The socket address tuple. Args: None Returns:", "to a file. Args: ObjPath : str (required) The STK Object Path for", "a report in STK and return them via socket. Args: ObjPath : str", "None Returns: None ''' pass @abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass", "int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout = float(", "Several attempts should be made, in case the instance of STK hasn't finished", "the likelihood that you receive all the data. However, this also adds a", "string containing the STK Connect command attempts: Optional; The maximum number of times", "AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None,", "the report. If None, then use the default (typically the parent object's time", "AdditionalData is not None: message += f' AdditionalData \"{AdditionalData}\"' if Summary is not", ": If too many NACK responses were received from STK. Examples: s.send(\"Unload /", "desired .RST file. TimePeriod : str or None (default: None) The time period", "exception caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect to STK via", "mode. connect_attempts : int The maximum number of attempts at connecting to the", "desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like object (required) The", "Style name, if it is already loaded into STK (or is a default", "path-like object (required) The Style name, if it is already loaded into STK", "def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message =", "use the Only value to have only the summary data reported. Returns: None", "= [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets): hdr, data", "time step <Value> to be used in creating the report. This value is", "''' timeout = timeout if timeout is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout)", "this option. More information on styles that require AdditionalData can be found at", "However, this also adds a mandatory minimum delay before the read() function returns.", "the time period or an Interval or Interval List component specification. For help", "return buffer def disconnect(self): '''Alias of .close()''' self.close() def close(self): '''Closes the STK", "step (usually 60 seconds) is used. Or enter the Array keyword with a", "commands are being processed asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self): '''int :", "def major_version(self): '''int : The major version number.''' return int(self.raw[5].decode()) @property def minor_version(self):", "The sync word, should always be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int", "use for the report. If None, then use the default (typically the parent", "'NAC': k = self.socket.recv(1).decode() msg = msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")')", "ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length", "Connect Asynchronous Message Format headers.''' def __init__(self, bytestring): '''Inits a new object using", "or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length =", ": The value of the response ID. This should be used to associate", "\"<TimeArraySpec>\" Enter the time step <Value> to be used in creating the report.", "'''Block until an ACK is received from STK Connect. Users should not typically", "report steps calculated on a specific time boundary. This value is entered in", "AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not", "parent object's time period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter", "value to have only the summary data reported. Returns: None ''' pass class", "STK (or is a default report style). Otherwise, pass a path to the", "'''Sends a Connect command via socket. Args: message: A string containing the STK", "while calling .send() before raising STKNackError. timeout : int or float (default: 1.0)", "the report. If None, then use the default (typically the parent object's timestep).", "until an ACK is received from STK Connect. Users should not typically need", "for the report. If None, then use the default (typically the parent object's", "continue loop if any exception caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to", "utf-8 -*- \"\"\" Created on Tue Aug 4 20:13:37 2020 @author: jolsten \"\"\"", "directly, as it is called from .send() if the class attribute ack=True Args:", "a bytes object containing the data received from the socket ''' timeout =", "STK Connect. Highly recommended to leave this to True. connect_attempts : int (default:", "try: self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def", "report data during access times between the <ObjectPath> and an AccessObject, but you", "= self.send_attempts attempt = 0 while True: attempt += 1 try: self._send(message) if", "return f'<{self.raw}>' @property def sync(self): '''str : The sync word, should always be", "self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias", "# report = '' # for msg in messages: # report += #", "AccessObject {AccessObjectPath}' if TimePeriod is not None: message += f' TimePeriod {TimePeriod}' if", "self.connect_attempts: raise STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}') time.sleep( 3", "time this is called. send_attempts : int (default: 1) Sets the default maximum", "Include value to have the summary included with the rest of the report;", "number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int : The length of the command", "def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate", "def total_packets(self): '''int : The total number of packets in the current identifier.'''", "of the current packet for this identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int", "int Sets the default maximum number of attempts to make while calling .send()", "reported. Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg =", "else: # exit loop if no exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}')", "Connect command via socket. Args: message: A string containing the STK Connect command", "period for calls to .read() before assuming all data was received. Because network", "This value is entered in seconds and must be between 0.000001 and 1000000000.0", "the STK Help. Summary : str or None (default: None) Summary data is", "being processed asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self): '''int : The total", "logging.debug('Getting Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] *", "from STK. Examples: s.send(\"Unload / *\") ''' if attempts is None: attempts =", "'''int : The length of the command type string.''' return int(self.raw[7:9]) @property def", "which the desired instance of STK is running. port : int The port", "def get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create a report in STK and", "- Interval List in STK Help. For information about \"<IntervalOrListSpec>\" see Component Specification.", "from .exceptions import * from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK", "already loaded into STK (or is a default report style). Otherwise, pass a", "'''Inits a new object using the raw values, passed as bytes or str.'''", ".send() before raising STKNackError. timeout : float Sets the default timeout period for", "raw values, passed as bytes or str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode()", "the RIC report for a Satellite. For these types of reports you must", "delay before the read() function returns. ''' self._kwargs = kwargs self.host = str(", "header.rstrip().split() length = int(length) data = self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr,", "data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '': del", "If None, then use the default (typically the parent object's timestep). Valid values:", "int(length) data = self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr, data = self.get_single_message()", "in the exported report file. Valid values: Include Only Specify the Include value", "for this specific call to .read() before assuming all data was received. Because", "or float (default: 1.0) Sets the default timeout period for calls to .read()", "Summary is not None: message += f' Summary {Summary}' if AllLines is not", "the report. This value is entered in seconds and must be between 0.000001", "return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper", "1) ) self.timeout = float( kwargs.get('timeout', 1 ) ) self.socket = None @property", "to use for the report. If None, then use the default (typically the", "with a Time Array component specification to use the array times as time", "raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def", "self.send(f'ConControl / AckOff') def _connect(self): attempt = 0 while True: attempt += 1", "returns. ''' self._kwargs = kwargs self.host = str( kwargs.get('host', 'localhost') ) self.port =", "not None: message += f' TimeStep {TimeStep}' if AdditionalData is not None: message", "'''A helper class to read the STK Connect Asynchronous Message Format headers.''' def", "received. ''' def __init__(self, **kwargs): '''Inits an STK connection object (Connect or AsyncConnect)", "RIC report for a Satellite. For these types of reports you must include", "not None: message += f' AccessObject {AccessObjectPath}' if TimePeriod is not None: message", "is called will not change the mode. connect_attempts : int The maximum number", "and 3600 seconds. If 0 is entered then the default time step (usually", "from STK Connect. Users should not typically need to use this method directly,", "instance of STK is running. port : int The port on which the", "unpredictable, increasing the timeout will increase the likelihood that you receive all the", "file. Valid values: Include Only Specify the Include value to have the summary", "= bool( kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts =", "hdr.data_length data = self.socket.recv( pdl ).decode() while len(data) < hdr.data_length: data += self.socket.recv(", "(default: None) Summary data is not generally included. Use this option, to have", "1.0) Sets the default timeout period for calls to .read() before assuming all", "None) The timestep to use for the report. If None, then use the", "Help. For information about \"<IntervalOrListSpec>\" see Component Specification. See STK Help for more", "'''int : The minor version number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int :", "while len(data) < hdr.data_length: data += self.socket.recv( pdl - len(data) ).decode() return hdr,", "self.socket.recv( pdl - len(data) ).decode() return hdr, data def get_multi_message(self): logging.debug('Getting Message Block:')", "ACK/NACK responses with STK Connect. Highly recommended to leave this to True. connect_attempts", "True finally: # continue loop if any exception caught if attempt >= self.connect_attempts:", "attempts: logging.error(f'send() failed, received NACK too many times') raise STKNackError(e) def _send(self, message:", "float or str (default: None) The timestep to use for the report. If", ") ) self.socket = None @property def address(self): '''The socket address tuple. Args:", "For these types of reports you must include this option. More information on", "This value is entered in seconds and must be between 0 and 3600", "read() function returns. Returns: bytes : a bytes object containing the data received", "is already loaded into STK (or is a default report style). Otherwise, pass", "[x[1] for x in messages] # report = '' # for msg in", "None: message += f' Summary {Summary}' if AllLines is not None: message +=", "message += f' AdditionalData \"{AdditionalData}\"' if Summary is not None: message += f'", "[ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper class", "sync(self): '''str : The sync word, should always be \"AGI\"''' return self.raw[0:3].decode() @property", "received NACK too many times') raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send(", "__init__(self, bytestring): '''Inits a new object using the raw values, passed as bytes", "socket. send_attempts : int Sets the default maximum number of attempts to make", "STK interval file for the time period or an Interval or Interval List", "== 'ACK': return True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def", "raising STKNackError. timeout : float Sets the default timeout period for calls to", "are being processed asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self): '''int : The", "for x in messages] # report = '' # for msg in messages:", "Args: host : str (default: 'localhost') port : int (default: 5001) ack :", "to have only the summary data reported. Returns: None ''' pass @abstractmethod def", "other if commands are being processed asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self):", "if hdr.async_type == 'ACK': return True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received:", ": The length of the data field for the current packet.''' return int(self.raw[38:42])", "return msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None):", "command attempts: Optional; The maximum number of times to send the command if", "def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block until", "at least one AccessObject. Or use the Intervals option to specify an STK", "TimePeriod : str or None (default: None) The time period to use for", "STK Help for more details on these options. TimeStep : float or str", "{TimeInterval} values see Time Options. Or specify UseAccessTimes to only report data during", "**kwargs): '''Create a report in STK and save it to a file. Args:", "in the socket...') buffer = b'' while True: try: buffer += self.socket.recv(4096) except", "in creating the report. This value is entered in seconds and must be", "socket. Args: None Returns: None ''' try: self.socket.close() except: pass def __repr__(self): return", "bytestring.decode() self.raw = bytestring def __repr__(self): return f'<{self.raw}>' @property def sync(self): '''str :", "socket. Args: timeout : int or None (default: None) Sets the timeout period", "on these options. TimeStep : float or str (default: None) The timestep to", "if TimePeriod is not None: message += f' TimePeriod {TimePeriod}' if TimeStep is", "message: A string containing the STK Connect command attempts: Optional; The maximum number", "Connect. Highly recommended to leave this to True. connect_attempts : int (default: 5)", "kwargs.get('timeout', 1 ) ) self.socket = None @property def address(self): '''The socket address", "bytes or str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw = bytestring def", "more details on these options. TimeStep : float or str (default: None) The", "connect_attempts : int (default: 5) The maximum number of attempts at connecting to", "TimeStep : float or str (default: None) The timestep to use for the", "report = '' # for msg in messages: # report += # return", "attempts: Optional; The maximum number of times to send the command if a", "Or enter the Array keyword with a Time Array component specification to use", "total number of packets in the current identifier.''' return int(self.raw[30:34]) @property def packet_number(self):", "'localhost') ) self.port = int( kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack', True)", "Enter the time step <Value> to be used in creating the report. This", "logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '': del msg_grp[-1] return msg_grp", "return int(self.raw[7:9]) @property def async_type(self): '''str : The value of the command type", "to have the summary included with the rest of the report; use the", "period to use for the report. If None, then use the default (typically", "data reported. Returns: None ''' pass @abstractmethod def report_rm(self, **kwargs): '''Create a report", "None ''' pass @abstractmethod def report_rm(self, **kwargs): '''Create a report in STK and", "creating the STK interval file, see Create & Import External Files - Interval", "object for the RIC report for a Satellite. For these types of reports", "called. send_attempts : int (default: 1) Sets the default maximum number of attempts", "return int(self.raw[30:34]) @property def packet_number(self): '''int : The sequence number of the current", "cmd_name, length = header.rstrip().split() length = int(length) data = self.socket.recv(length).decode() return header, data", "in STK and return them via socket. Args: ObjPath : str (required) The", "port on which the desired instance is accepting connections. address : tuple The", "(default: 5) The maximum number of attempts at connecting to the socket. Several", "0: return [] return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class", "int or None (default: None) Sets the timeout period for this specific call", "Create & Import External Files - Interval List in STK Help. For information", "message, attempts=None): '''Sends a Connect command via socket. Args: message: A string containing", "the Only value to have only the summary data reported. Returns: None '''", "messages = [] for i in range(int(data)): sm = self.get_single_message() if len(sm) >", "calling .send() before raising STKNackError. timeout : float Sets the default timeout period", "Data\" in the STK Help. Summary : str or None (default: None) Summary", "object's time period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval}", "The major version number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int : The minor", "messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None,", "and an AccessObject, but you must also specify at least one AccessObject. Or", "<Value> to be used in creating the report. This value is entered in", "number of attempts to make while calling .send() before raising STKNackError. timeout :", "except STKNackError as e: if attempt >= attempts: logging.error(f'send() failed, received NACK too", "_connect(self): attempt = 0 while True: attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError", "= timeout if timeout is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until", "for more details on these options. TimeStep : float or str (default: None)", "report for a Satellite. For these types of reports you must include this", "on styles that require AdditionalData can be found at \"Report Additional Data\" in", "host on which the desired instance of STK is running. port : int", "must be between 0 and 3600 seconds. If 0 is entered then the", "this identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int : The length of the", "Asynchronous Message Format headers.''' def __init__(self, bytestring): '''Inits a new object using the", "then use the default (typically the parent object's time period). Valid values: UseAccessTimes", "value is entered in seconds and must be between 0.000001 and 1000000000.0 seconds.", "version(self): '''str : The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self):", "the Include value to have the summary included with the rest of the", "with the rest of the report; use the Only value to have only", "and return them via socket. Args: ObjPath : str (required) The STK Object", "Only Summary data is not generally included. Use this option, to have the", "= int(length) data = self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr, data =", "whether or not to use ACK/NACK responses with STK Connect. Highly recommended to", "type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack is", "the correct responses with each other if commands are being processed asynchronously. '''", "get_ack(self, message): hdr, data = self.get_single_message() if hdr.async_type == 'ACK': return True elif", "port : int (default: 5001) ack : bool (default: True) Specifies whether or", "file. TimePeriod : str or None (default: None) The time period to use", "def address(self): '''The socket address tuple. Args: None Returns: tuple : (host, port)", "attribute ack=True Args: None Returns: None ''' pass @abstractmethod def get_single_message(self): pass @abstractmethod", "0 is entered then the default time step (usually 60 seconds) is used.", "= data if msg_grp[-1] == '': del msg_grp[-1] return msg_grp @inherit_docstrings def report(self,", "data was received. ''' def __init__(self, **kwargs): '''Inits an STK connection object (Connect", "return messages @inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None,", "report; use the Only value to have only the summary data reported. Returns:", "or not to use ACK/NACK responses with STK Connect. Highly recommended to leave", "at \"Report Additional Data\" in the STK Help. Summary : str or None", "got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length", "0: messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None,", ": float Sets the default timeout period for calls to .read() before assuming", "default report style). Otherwise, pass a path to the desired .RST file. FilePath", "connect to STK via socket on {self.host}:{self.port}') time.sleep( 3 ) def send(self, message,", "stop time for the report span. For valid {TimeInterval} values see Time Options.", "self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr, data = self.get_single_message() messages = []", ").decode() while len(data) < hdr.data_length: data += self.socket.recv( pdl - len(data) ).decode() return", "send(self, message, attempts=None): '''Sends a Connect command via socket. Args: message: A string", "= 0 while True: attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError as e:", "assuming all data was received. Because network traffic is unpredictable, increasing the timeout", "Help. Summary : str or None (default: None) Summary data is not generally", "The address as a tuple (host, port) ack : bool A boolean representing", "have the report steps calculated on a specific time boundary. This value is", "caught logging.info(f'Connected to STK on {self.host}:{self.port}') return True finally: # continue loop if", "+= f' TimeStep {TimeStep}' if AdditionalData is not None: message += f' AdditionalData", "Valid values: Include Only Summary data is not generally included. Use this option,", "Specification. AdditionalData : Some Report Styles require additional or pre-data, such as a", "UseAccessTimes to only report data during access times between the <ObjectPath> and an", "@property def minor_version(self): '''int : The minor version number.''' return int(self.raw[6].decode()) @property def", "with each other if commands are being processed asynchronously. ''' return int(self.raw[24:30]) @property", "str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw = bytestring def __repr__(self): return", ".join() # buffer = self.read(**kwargs).decode() # if len(buffer) == 0: return [] #", "self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) == 0: return [] return [ x[18:]", "None) Sets the timeout period for this specific call to .read() before assuming", "if len(buffer) == 0: return [] return [ x[18:] for x in buffer.split('AGI421009REPORT_RM", "identifier(self): '''int : The value of the response ID. This should be used", "parent object's timestep). Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the time", "for more details on these options. TimeStep : float or str The timestep", "The path to the file to which the report should be written. TimePeriod", "a report in STK and save it to a file. Args: ObjPath :", "an ACK is received from STK Connect. Users should not typically need to", "function returns. Returns: bytes : a bytes object containing the data received from", "class AsyncHeader(): '''A helper class to read the STK Connect Asynchronous Message Format", "report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like object (required) The Style", "def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create a", "too many times') raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() )", "== 0: return [] # logging.debug(f'Report_RM Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect):", "to the desired .RST file. FilePath : str or path-like object (required) The", "raise STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}') time.sleep( 3 )", "period for calls to .read() before assuming all data was received. ''' def", "see Component Specification. AdditionalData : Some Report Styles require additional or pre-data, such", "object (Connect or AsyncConnect) Args: host : str (default: 'localhost') port : int", "Returns: None Raises: STKConnectError : If, after .connect_attempts attempts, a connection couldn't be", "str (default: None) The timestep to use for the report. If None, then", "STK a moment to start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn')", "this also adds a mandatory minimum delay before the read() function returns. '''", "and 1000000000.0 seconds. Or enter Bound <Value> to have the report steps calculated", "the array times as time steps. For information about \"<TimeArraySpec>\" see Component Specification.", "attempts to make while calling .send() before raising STKNackError. timeout : float Sets", "for a Satellite. For these types of reports you must include this option.", "msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv( pdl", "in the exported report file. Specify the Include value to have the summary", "len(data) ).decode() return hdr, data def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data =", ") self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts', 1) )", "{TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start time and", "pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block", "enter Bound <Value> to have the report steps calculated on a specific time", "File \"{FilePath}\"' if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}' if", ": str or path-like object (required) The Style name, if it is already", "the STK Connect Asynchronous Message Format headers.''' def __init__(self, bytestring): '''Inits a new", "as e: if attempt >= attempts: logging.error(f'send() failed, received NACK too many times')", "version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The major", "Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode() if msg == 'ACK': #", "> 0: messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None,", "def sync(self): '''str : The sync word, should always be \"AGI\"''' return self.raw[0:3].decode()", "in the current identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int : The sequence", "Specify the Include value to have the summary included with the rest of", "attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: #", "+= f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None,", "None Raises: STKConnectError : If, after .connect_attempts attempts, a connection couldn't be made", "= self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length = int(length) data = self.socket.recv(length).decode() return", "Highly recommended to leave this to True. connect_attempts : int (default: 5) The", "TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\"", "This should be used to associate the correct responses with each other if", "whether the instance is using ACK/NACK. Changing this after .connect() is called will", "define the start time and stop time for the report span. For valid", "return True finally: # continue loop if any exception caught if attempt >=", "you must also specify at least one AccessObject. Or use the Intervals option", "the response ID. This should be used to associate the correct responses with", "or an Interval or Interval List component specification. For help on creating the", "host : str (default: 'localhost') port : int (default: 5001) ack : bool", "default report style). Otherwise, pass a path to the desired .RST file. TimePeriod", "as bytes or str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw = bytestring", "AdditionalData can be found at \"Report Additional Data\" in the STK Help. Summary", "the STK Connect command attempts: Optional; The maximum number of times to send", "on {self.host}:{self.port}') return True finally: # continue loop if any exception caught if", "increase the likelihood that you receive all the data. However, this also adds", "logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if no exceptions caught logging.info(f'Connected to STK", "if attempt >= attempts: logging.error(f'send() failed, received NACK too many times') raise STKNackError(e)", "(Connect or AsyncConnect) Args: host : str (default: 'localhost') port : int (default:", "an AccessObject, but you must also specify at least one AccessObject. Or use", "return hdr, data def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage:", "finally: # continue loop if any exception caught if attempt >= self.connect_attempts: raise", "it is already loaded into STK (or is a default report style). Otherwise,", "calls to .read() before assuming all data was received. Because network traffic is", "if msg_grp[-1] == '': del msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath, Style,", "or str (default: None) The timestep to use for the report. If None,", "to make while calling .send() before raising STKNackError. timeout : int or float", "attempts should be made, in case the instance of STK hasn't finished initializing", "len(sm) > 0: messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None,", "format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The major version number.''' return", "True: attempt += 1 try: self._send(message) if self.ack: self.get_ack(message) return except STKNackError as", "'''Alias of .close()''' self.close() def close(self): '''Closes the STK Connect socket. Args: None", "(default: 1.0) Sets the default timeout period for calls to .read() before assuming", "-*- coding: utf-8 -*- \"\"\" Created on Tue Aug 4 20:13:37 2020 @author:", "for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper class to read", "and must be between 0.000001 and 1000000000.0 seconds. Or enter Bound <Value> to", "couldn't be made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK", "Connect socket. Args: None Returns: None ''' try: self.socket.close() except: pass def __repr__(self):", "AckOff') def _connect(self): attempt = 0 while True: attempt += 1 try: self.socket.connect(self.address)", "i in range(int(data)): sm = self.get_single_message() if len(sm) > 0: messages.append(sm) return messages", "**kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message", "f' AdditionalData \"{AdditionalData}\"' if Summary is not None: message += f' Summary {Summary}'", "details on these options. TimeStep : float or str The timestep to use", "of .close()''' self.close() def close(self): '''Closes the STK Connect socket. Args: None Returns:", "self.close() @abstractmethod def get_ack(self, message): '''Block until an ACK is received from STK", "kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout',", "length = header.rstrip().split() length = int(length) data = self.socket.recv(length).decode() return header, data def", "AsyncHeader(): '''A helper class to read the STK Connect Asynchronous Message Format headers.'''", "+= f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) == 0: return", "/ AckOff') def _connect(self): attempt = 0 while True: attempt += 1 try:", "= bytestring def __repr__(self): return f'<{self.raw}>' @property def sync(self): '''str : The sync", "message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None,", "if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw = bytestring def __repr__(self): return f'<{self.raw}>'", "method directly, as it is called from .send() if the class attribute ack=True", "path to the file to which the report should be written. TimePeriod :", "received from STK Connect. Users should not typically need to use this method", "k = self.socket.recv(1).decode() msg = msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else:", "connection object (Connect or AsyncConnect) Args: host : str (default: 'localhost') port :", "data = self.get_single_message() if hdr.async_type == 'ACK': return True elif hdr.async_type == 'NACK':", "AllLines is not None: message += f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message()", "should always be 42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str : The version", "import ABCMeta, abstractmethod from .exceptions import * from .utils import STK_DATEFMT, inherit_docstrings class", "major_version(self): '''int : The major version number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int", "STK. Examples: s.send(\"Unload / *\") ''' if attempts is None: attempts = self.send_attempts", "self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv( pdl ).decode() while", "Or enter Bound <Value> to have the report steps calculated on a specific", "the mode. connect_attempts : int The maximum number of attempts at connecting to", "[] for i in range(int(data)): sm = self.get_single_message() if len(sm) > 0: messages.append(sm)", "options. TimeStep : float or str The timestep to use for the report.", "the STK Connect socket specified. Args: None Returns: None Raises: STKConnectError : If,", "identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int : The length of the data", "style). Otherwise, pass a path to the desired .RST file. TimePeriod : str", "before assuming all data was received. ''' def __init__(self, **kwargs): '''Inits an STK", "= self.read(**kwargs).decode() # if len(buffer) == 0: return [] # logging.debug(f'Report_RM Returned: {buffer}')", "that require AdditionalData can be found at \"Report Additional Data\" in the STK", "should always be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int : The header_length,", "from abc import ABCMeta, abstractmethod from .exceptions import * from .utils import STK_DATEFMT,", "20:13:37 2020 @author: jolsten \"\"\" import sys, logging import socket import time from", "logging.error(f'send() failed, received NACK too many times') raise STKNackError(e) def _send(self, message: str):", "file, see Create & Import External Files - Interval List in STK Help.", "to STK on {self.host}:{self.port}') return True finally: # continue loop if any exception", "tuple (host, port) ack : bool A boolean representing whether the instance is", "The header_length, should always be 42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str :", "data_length(self): '''int : The length of the data field for the current packet.'''", "(typically the parent object's time period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" |", "* hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets): hdr, data = self.get_message()", "def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message =", "this also adds a mandatory minimum delay before the read() function returns. Returns:", "Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like object (required) The Style name, if", "summary data reported. Returns: None ''' pass @abstractmethod def report_rm(self, **kwargs): '''Create a", "True) Specifies whether or not to use ACK/NACK responses with STK Connect. Highly", "to specify an STK interval file for the time period or an Interval", "if AllLines is not None: message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings def", "values: Include Only Summary data is not generally included. Use this option, to", "del msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None,", "socket. Several attempts should be made, in case the instance of STK hasn't", "(required) The STK Object Path for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style", "def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read all", "access times between the <ObjectPath> and an AccessObject, but you must also specify", "def get_ack(self, message): hdr, data = self.get_single_message() if hdr.async_type == 'ACK': return True", "information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : Some Report Styles require additional", "data = self.get_single_message() messages = [] for i in range(int(data)): sm = self.get_single_message()", "a connection couldn't be made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) #", "least one AccessObject. Or use the Intervals option to specify an STK interval", "to .read() before assuming all data was received. ''' def __init__(self, **kwargs): '''Inits", "\"{FilePath}\"' if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}' if TimePeriod", "(self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The value of the response ID. This", "def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length = int(length) data", "which the report should be written. TimePeriod : str or None (default: None)", "command type string.''' return int(self.raw[7:9]) @property def async_type(self): '''str : The value of", "start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff')", "minimum delay before the read() function returns. Returns: bytes : a bytes object", "self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout", "bytestring): '''Inits a new object using the raw values, passed as bytes or", "identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int : The sequence number of the", "summary data included in the exported report file. Specify the Include value to", "1000000000.0 seconds. Or enter Bound <Value> to have the report steps calculated on", "style). Otherwise, pass a path to the desired .RST file. FilePath : str", "f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return [x[1] for x in messages]", "kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts',", "option. More information on styles that require AdditionalData can be found at \"Report", "see Component Specification. AdditionalData : str or None (default: None) Some Report Styles", "self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for i", "and save it to a file. Args: ObjPath : str (required) The STK", "TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"' if", "boundary. This value is entered in seconds and must be between 0 and", "time boundary. This value is entered in seconds and must be between 0", "file. Args: ObjPath : str (required) The STK Object Path for the desired", "passed as bytes or str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw =", "self._send(message) if self.ack: self.get_ack(message) return except STKNackError as e: if attempt >= attempts:", "as a comparison object for the RIC report for a Satellite. For these", "before assuming all data was received. Because network traffic is unpredictable, increasing the", "AdditionalData : str or None (default: None) Some Report Styles require additional or", "None, then use the default (typically the parent object's time period). Valid values:", "self.timeout = float( kwargs.get('timeout', 1 ) ) self.socket = None @property def address(self):", "data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data", "@property def header_length(self): '''int : The header_length, should always be 42.''' return int(self.raw[3:5].decode())", "options. TimeStep : float or str (default: None) The timestep to use for", "exit loop if no exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}') return True", "also adds a mandatory minimum delay before the read() function returns. Returns: bytes", "60 seconds) is used. Or enter the Array keyword with a Time Array", "ACK/NACK. Changing this after .connect() is called will not change the mode. connect_attempts", "None) The time period to use for the report. If None, then use", "socket. Args: ObjPath : str (required) The STK Object Path for the desired", "timeout=None): '''Read all available data from the TCP/IP socket. Args: timeout : int", "try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if", "None (default: None) The time period to use for the report. If None,", "STK Connect socket specified. Args: None Returns: None Raises: STKConnectError : If, after", "report(self, **kwargs): '''Create a report in STK and save it to a file.", "times between the <ObjectPath> and an AccessObject, but you must also specify at", "or None (default: None) Summary data is not generally included. Use this option,", "is not None: message += f' TimePeriod {TimePeriod}' if TimeStep is not None:", "which the desired instance is accepting connections. address : tuple The address as", "Attributes: host : str The host on which the desired instance of STK", "s.send(\"Unload / *\") ''' if attempts is None: attempts = self.send_attempts attempt =", ": The total number of packets in the current identifier.''' return int(self.raw[30:34]) @property", "data if msg_grp[-1] == '': del msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath,", "connection couldn't be made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give", "= int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout =", "self.ack is not True: self.send(f'ConControl / AckOff') def _connect(self): attempt = 0 while", "self.get_single_message() if hdr.async_type == 'ACK': return True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK", "@abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create", "should be made, in case the instance of STK hasn't finished initializing by", "timeout : int or float (default: 1.0) Sets the default timeout period for", "a comparison object for the RIC report for a Satellite. For these types", ": The major version number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int : The", "Some Report Styles require additional or pre-data, such as a comparison object for", "int( kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout', 1 ) ) self.socket =", "/ *\") ''' if attempts is None: attempts = self.send_attempts attempt = 0", "msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}')", "be 42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str : The version in major.minor", "+= f' Summary {Summary}' if AllLines is not None: message += f' AllLines", "bytes object containing the data received from the socket ''' timeout = timeout", "on a specific time boundary. This value is entered in seconds and must", "**kwargs): '''Create a report in STK and return them via socket. Args: ObjPath", "AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is", "buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper class to read the STK Connect", "the summary included with the rest of the report; use the Only value", "int The maximum number of attempts at connecting to the socket. send_attempts :", "x in messages] # report = '' # for msg in messages: #", "Args: None Returns: tuple : (host, port) ''' return (self.host, self.port) def connect(self):", "have only the summary data reported. Returns: None ''' pass @abstractmethod def report_rm(self,", "None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode() if", "Or use the Intervals option to specify an STK interval file for the", "is entered then the default time step (usually 60 seconds) is used. Or", "seconds and must be between 0 and 3600 seconds. If 0 is entered", "@inherit_docstrings def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message", "message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read all available data", "number of times to send the command if a NACK is received. Returns:", "int (default: 5) The maximum number of attempts at connecting to the socket.", "the command if a NACK is received. Returns: None Raises: STKNackError : If", "data += self.socket.recv( pdl - len(data) ).decode() return hdr, data def get_multi_message(self): logging.debug('Getting", "Summary {Summary}' if AllLines is not None: message += f' AllLines {AllLines}' self.send(message)", "str The host on which the desired instance of STK is running. port", ">= attempts: logging.error(f'send() failed, received NACK too many times') raise STKNackError(e) def _send(self,", "async_type(self): '''str : The value of the command type string.''' return (self.raw[9:24])[0:self.type_length] @property", "the data. However, this also adds a mandatory minimum delay before the read()", "STK on {self.host}:{self.port}') return True finally: # continue loop if any exception caught", "class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data = self.get_single_message() if hdr.async_type ==", "5001) ack : bool (default: True) Specifies whether or not to use ACK/NACK", "of attempts to make while calling .send() before raising STKNackError. timeout : float", "desired .RST file. FilePath : str or path-like object (required) The path to", "return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The major version number.''' return int(self.raw[5].decode())", "the default (typically the parent object's time period). Valid values: UseAccessTimes {TimeInterval} Intervals", "use ACK/NACK responses with STK Connect. Highly recommended to leave this to True.", "is None: attempts = self.send_attempts attempt = 0 while True: attempt += 1", "{TimeInterval} to define the start time and stop time for the report span.", "STKNackError. timeout : int or float (default: 1.0) Sets the default timeout period", "about \"<IntervalOrListSpec>\" see Component Specification. See STK Help for more details on these", "TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type", "int(self.raw[3:5].decode()) @property def version(self): '''str : The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}'", "traffic is unpredictable, increasing the timeout will increase the likelihood that you receive", "to True. connect_attempts : int (default: 5) The maximum number of attempts at", "for msg in messages: # report += # return .join() # buffer =", "AccessObject, but you must also specify at least one AccessObject. Or use the", "time.sleep( 3 ) def send(self, message, attempts=None): '''Sends a Connect command via socket.", "it to a file. Args: ObjPath : str (required) The STK Object Path", "case the instance of STK hasn't finished initializing by the time this is", "or Interval List component specification. For help on creating the STK interval file,", "type_length(self): '''int : The length of the command type string.''' return int(self.raw[7:9]) @property", ".connect_attempts attempts, a connection couldn't be made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "to only report data during access times between the <ObjectPath> and an AccessObject,", "path to the desired .RST file. FilePath : str or path-like object (required)", "the socket. send_attempts : int Sets the default maximum number of attempts to", "were received from STK. Examples: s.send(\"Unload / *\") ''' if attempts is None:", "running. port : int The port on which the desired instance is accepting", "STK Connect connection class. Attributes: host : str The host on which the", "these types of reports you must include this option. More information on styles", ": The sync word, should always be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self):", "not None: message += f' TimePeriod {TimePeriod}' if TimeStep is not None: message", "Path for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like", "UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start time", "them via socket. Args: ObjPath : str (required) The STK Object Path for", "desired instance of STK is running. port : int The port on which", "not to use ACK/NACK responses with STK Connect. Highly recommended to leave this", "attempt = 0 while True: attempt += 1 try: self._send(message) if self.ack: self.get_ack(message)", ": int The port on which the desired instance is accepting connections. address", "self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if no", "Help for more details on these options. TimeStep : float or str The", "of the response ID. This should be used to associate the correct responses", "of packets in the current identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int :", "Connect command attempts: Optional; The maximum number of times to send the command", "__init__(self, **kwargs): '''Inits an STK connection object (Connect or AsyncConnect) Args: host :", "buffer = self.read(**kwargs).decode() if len(buffer) == 0: return [] return [ x[18:] for", "or AsyncConnect) Args: host : str (default: 'localhost') port : int (default: 5001)", "length = int(length) data = self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr, data", "between 0 and 3600 seconds. If 0 is entered then the default time", ": If, after .connect_attempts attempts, a connection couldn't be made successfully.' ''' self.socket", "responses were received from STK. Examples: s.send(\"Unload / *\") ''' if attempts is", "be found at \"Report Additional Data\" in the STK Help. Summary : str", "For help on creating the STK interval file, see Create & Import External", "AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is", "summary data reported. Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message):", "was received. Because network traffic is unpredictable, increasing the timeout will increase the", "the report span. For valid {TimeInterval} values see Time Options. Or specify UseAccessTimes", "None Returns: None ''' try: self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def", "# logging.debug(f'Report_RM Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message):", "minor version number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int : The length of", "or None (default: None) The time period to use for the report. If", "number of attempts at connecting to the socket. Several attempts should be made,", "information on styles that require AdditionalData can be found at \"Report Additional Data\"", "if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack", "Args: timeout : int or None (default: None) Sets the timeout period for", "see Create & Import External Files - Interval List in STK Help. For", "styles that require AdditionalData can be found at \"Report Additional Data\" in the", "get_multi_message(self): hdr, data = self.get_single_message() messages = [] for i in range(int(data)): sm", "List component specification. For help on creating the STK interval file, see Create", "self._connect() if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff') if", "hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage:", "times') raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self,", "found at \"Report Additional Data\" in the STK Help. Summary : str Valid", "if no exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}') return True finally: #", "STKNackError. timeout : float Sets the default timeout period for calls to .read()", "version number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int : The length of the", "is not None: message += f' Summary {Summary}' if AllLines is not None:", "'''str : The sync word, should always be \"AGI\"''' return self.raw[0:3].decode() @property def", "NACK responses were received from STK. Examples: s.send(\"Unload / *\") ''' if attempts", "| \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start time and stop time for", "STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self):", "Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the", "None: attempts = self.send_attempts attempt = 0 while True: attempt += 1 try:", "TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if", "report file. Specify the Include value to have the summary included with the", "(default: None) Some Report Styles require additional or pre-data, such as a comparison", "@abstractmethod def get_ack(self, message): '''Block until an ACK is received from STK Connect.", "(usually 60 seconds) is used. Or enter the Array keyword with a Time", "timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is left in the", "def packet_number(self): '''int : The sequence number of the current packet for this", "Sets the default timeout period for calls to .read() before assuming all data", "default timeout period for calls to .read() before assuming all data was received.", "(message+'\\n').encode() ) def read(self, timeout=None): '''Read all available data from the TCP/IP socket.", "time period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to", "None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is left in", "AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack is not True: self.send(f'ConControl / AckOff')", "connect(self): '''Connect to the STK Connect socket specified. Args: None Returns: None Raises:", "connect_attempts : int The maximum number of attempts at connecting to the socket.", "report style). Otherwise, pass a path to the desired .RST file. TimePeriod :", "via socket. Args: message: A string containing the STK Connect command attempts: Optional;", "should be used to associate the correct responses with each other if commands", "of attempts at connecting to the socket. Several attempts should be made, in", "self._kwargs = kwargs self.host = str( kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port',", "\"Report Additional Data\" in the STK Help. Summary : str Valid values: Include", ": int Sets the default maximum number of attempts to make while calling", ": int (default: 5001) ack : bool (default: True) Specifies whether or not", "processed asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self): '''int : The total number", "attempts is None: attempts = self.send_attempts attempt = 0 while True: attempt +=", "self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of .close()''' self.close() def close(self): '''Closes the", "Additional Data\" in the STK Help. Summary : str Valid values: Include Only", "{AllLines}' self.send(message) messages = self.get_multi_message() return [x[1] for x in messages] # report", "Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None:", "tuple The address as a tuple (host, port) ack : bool A boolean", "values see Time Options. Or specify UseAccessTimes to only report data during access", "return int(self.raw[6].decode()) @property def type_length(self): '''int : The length of the command type", "f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message += f' AccessObject", "int (default: 1) Sets the default maximum number of attempts to make while", "pass @abstractmethod def report(self, **kwargs): '''Create a report in STK and save it", "the default timeout period for calls to .read() before assuming all data was", "the instance is using ACK/NACK. Changing this after .connect() is called will not", "self.host = str( kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port', 5001) ) self.ack", "use the default (typically the parent object's timestep). Valid values: <Value> Bound <Value>", "buffer = self.read(**kwargs).decode() # if len(buffer) == 0: return [] # logging.debug(f'Report_RM Returned:", "attempts at connecting to the socket. Several attempts should be made, in case", "port : int The port on which the desired instance is accepting connections.", ": int (default: 1) Sets the default maximum number of attempts to make", "before the read() function returns. ''' self._kwargs = kwargs self.host = str( kwargs.get('host',", "a Satellite. For these types of reports you must include this option. More", "calling .send() before raising STKNackError. timeout : int or float (default: 1.0) Sets", "self.socket.settimeout(timeout) logging.debug('Reading until no data is left in the socket...') buffer = b''", "to use ACK/NACK responses with STK Connect. Highly recommended to leave this to", "attempts=None): '''Sends a Connect command via socket. Args: message: A string containing the", ".read() before assuming all data was received. Because network traffic is unpredictable, increasing", "the <ObjectPath> and an AccessObject, but you must also specify at least one", "List in STK Help. For information about \"<IntervalOrListSpec>\" see Component Specification. See STK", "+= self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer def disconnect(self):", "(or is a default report style). Otherwise, pass a path to the desired", "to connect to STK via socket on {self.host}:{self.port}') time.sleep( 3 ) def send(self,", "on {self.host}:{self.port}') time.sleep( 3 ) def send(self, message, attempts=None): '''Sends a Connect command", "about \"<TimeArraySpec>\" see Component Specification. AdditionalData : str or None (default: None) Some", "this option, to have the summary data included in the exported report file.", "logging.info(f'Connected to STK on {self.host}:{self.port}') return True finally: # continue loop if any", "The length of the command type string.''' return int(self.raw[7:9]) @property def async_type(self): '''str", "'''An STK Connect connection class. Attributes: host : str The host on which", "of attempts at connecting to the socket. send_attempts : int Sets the default", "as a tuple (host, port) ack : bool A boolean representing whether the", "is not True: self.send(f'ConControl / AckOff') def _connect(self): attempt = 0 while True:", "AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File", "maximum number of attempts at connecting to the socket. Several attempts should be", "return except STKNackError as e: if attempt >= attempts: logging.error(f'send() failed, received NACK", "between 0.000001 and 1000000000.0 seconds. Or enter Bound <Value> to have the report", "None Returns: None Raises: STKConnectError : If, after .connect_attempts attempts, a connection couldn't", "{hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '': del msg_grp[-1] return msg_grp @inherit_docstrings", ".send() before raising STKNackError. timeout : int or float (default: 1.0) Sets the", "0.000001 and 1000000000.0 seconds. Or enter Bound <Value> to have the report steps", ": int The maximum number of attempts at connecting to the socket. send_attempts", "a specific time boundary. This value is entered in seconds and must be", "\"{AdditionalData}\"' if Summary is not None: message += f' Summary {Summary}' if AllLines", "connection class. Attributes: host : str The host on which the desired instance", "header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length = int(length) data = self.socket.recv(length).decode()", "summary included with the rest of the report; use the Only value to", "is called. send_attempts : int (default: 1) Sets the default maximum number of", "except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if no exceptions", "mandatory minimum delay before the read() function returns. Returns: bytes : a bytes", "if TimeStep is not None: message += f' TimeStep {TimeStep}' if AdditionalData is", "moment to start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl", "to the file to which the report should be written. TimePeriod : str", "Array \"<TimeArraySpec>\" Enter the time step <Value> to be used in creating the", "AsyncOff') if self.ack is not True: self.send(f'ConControl / AckOff') def _connect(self): attempt =", "def connect(self): '''Connect to the STK Connect socket specified. Args: None Returns: None", "type string.''' return int(self.raw[7:9]) @property def async_type(self): '''str : The value of the", "return int(self.raw[34:38]) @property def data_length(self): '''int : The length of the data field", "seconds. Or enter Bound <Value> to have the report steps calculated on a", "self.get_ack(message) return except STKNackError as e: if attempt >= attempts: logging.error(f'send() failed, received", "is not None: message += f' TimeStep {TimeStep}' if AdditionalData is not None:", "if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect to STK via socket on", "msg_grp[-1] == '': del msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None,", "None ''' pass @abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod def", "these options. TimeStep : float or str (default: None) The timestep to use", "a tuple (host, port) ack : bool A boolean representing whether the instance", "If None, then use the default (typically the parent object's time period). Valid", "{self.host}:{self.port}') return True finally: # continue loop if any exception caught if attempt", "use this method directly, as it is called from .send() if the class", "not generally included. Use this option, to have the summary data included in", "{self.host}:{self.port}') time.sleep( 3 ) def send(self, message, attempts=None): '''Sends a Connect command via", "self.ack: self.get_ack(message) return except STKNackError as e: if attempt >= attempts: logging.error(f'send() failed,", "raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl", "= [] for i in range(int(data)): sm = self.get_single_message() if len(sm) > 0:", "f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) == 0: return []", "*\") ''' if attempts is None: attempts = self.send_attempts attempt = 0 while", "self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK Received') return elif msg == 'NAC':", ": str or path-like object (required) The path to the file to which", "message += f' Summary {Summary}' if AllLines is not None: message += f'", "\"{Style}\"' if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}' if TimePeriod", "class. Attributes: host : str The host on which the desired instance of", "Object Path for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or", "msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None,", "report file. Valid values: Include Only Specify the Include value to have the", "to start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl /", "message += f' AccessObject {AccessObjectPath}' if TimePeriod is not None: message += f'", "AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath", "FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"", "successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a moment to", "ACK is received from STK Connect. Users should not typically need to use", "specify an STK interval file for the time period or an Interval or", "x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper class to read the", "@property def type_length(self): '''int : The length of the command type string.''' return", "pdl = hdr.data_length data = self.socket.recv( pdl ).decode() while len(data) < hdr.data_length: data", "the desired instance of STK is running. port : int The port on", "value is entered in seconds and must be between 0 and 3600 seconds.", "pass @abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod def report(self, **kwargs):", "return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data = self.get_single_message() if", "address(self): '''The socket address tuple. Args: None Returns: tuple : (host, port) '''", "report. If None, then use the default (typically the parent object's timestep). Valid", "packet_number(self): '''int : The sequence number of the current packet for this identifier.'''", "a mandatory minimum delay before the read() function returns. Returns: bytes : a", "time steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : Some Report", "after .connect_attempts attempts, a connection couldn't be made successfully.' ''' self.socket = socket.socket(socket.AF_INET,", "self.read(**kwargs).decode() if len(buffer) == 0: return [] return [ x[18:] for x in", "this is called. send_attempts : int (default: 1) Sets the default maximum number", "valid {TimeInterval} values see Time Options. Or specify UseAccessTimes to only report data", "True: try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return", "report. This value is entered in seconds and must be between 0.000001 and", "the socket ''' timeout = timeout if timeout is None: timeout = self.timeout", "None (default: None) Sets the timeout period for this specific call to .read()", "'ACK': return True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self):", "*/{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'", "The maximum number of attempts at connecting to the socket. Several attempts should", "on which the desired instance is accepting connections. address : tuple The address", ": float or str (default: None) The timestep to use for the report.", "during access times between the <ObjectPath> and an AccessObject, but you must also", "')[1:] ] class AsyncHeader(): '''A helper class to read the STK Connect Asynchronous", "= int( kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout', 1 ) ) self.socket", "''' if attempts is None: attempts = self.send_attempts attempt = 0 while True:", "or str The timestep to use for the report. If None, then use", "3 ) def send(self, message, attempts=None): '''Sends a Connect command via socket. Args:", "the desired .RST file. TimePeriod : str or None (default: None) The time", "{\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start time and stop time", "TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath", "minimum delay before the read() function returns. ''' self._kwargs = kwargs self.host =", "address tuple. Args: None Returns: tuple : (host, port) ''' return (self.host, self.port)", "(required) The path to the file to which the report should be written.", "Use this option, to have the summary data included in the exported report", "header, data def get_multi_message(self): hdr, data = self.get_single_message() messages = [] for i", "can be found at \"Report Additional Data\" in the STK Help. Summary :", "\"\"\" Created on Tue Aug 4 20:13:37 2020 @author: jolsten \"\"\" import sys,", "@inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message", "likelihood that you receive all the data. However, this also adds a mandatory", "= 0 while True: attempt += 1 try: self._send(message) if self.ack: self.get_ack(message) return", "to have the summary data included in the exported report file. Valid values:", "For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : str or None (default:", "self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '': del msg_grp[-1] return", "on creating the STK interval file, see Create & Import External Files -", "'''int : The total number of packets in the current identifier.''' return int(self.raw[30:34])", "# exit loop if no exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}') return", "For valid {TimeInterval} values see Time Options. Or specify UseAccessTimes to only report", "option, to have the summary data included in the exported report file. Specify", "STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}') time.sleep( 3 ) def", "received. Returns: None Raises: STKNackError : If too many NACK responses were received", "self.send(f'ConControl / AsyncOff') if self.ack is not True: self.send(f'ConControl / AckOff') def _connect(self):", "must include this option. More information on styles that require AdditionalData can be", "(host, port) ack : bool A boolean representing whether the instance is using", "string.''' return int(self.raw[7:9]) @property def async_type(self): '''str : The value of the command", ".close()''' self.close() def close(self): '''Closes the STK Connect socket. Args: None Returns: None", "path-like object (required) The path to the file to which the report should", "\"Export\" File \"{FilePath}\"' if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'", "entered then the default time step (usually 60 seconds) is used. Or enter", "ack : bool A boolean representing whether the instance is using ACK/NACK. Changing", "the raw values, passed as bytes or str.''' if isinstance(bytestring, bytes): bytestring =", "made, in case the instance of STK hasn't finished initializing by the time", "# give STK a moment to start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl", "if AdditionalData is not None: message += f' AdditionalData \"{AdditionalData}\"' if Summary is", "is unpredictable, increasing the timeout will increase the likelihood that you receive all", "Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is not None: message +=", "i in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if", "= f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message += f'", "file. Specify the Include value to have the summary included with the rest", "return self.raw[0:3].decode() @property def header_length(self): '''int : The header_length, should always be 42.'''", "== '': del msg_grp[-1] return msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None,", "if attempts is None: attempts = self.send_attempts attempt = 0 while True: attempt", "to which the report should be written. TimePeriod : str or None (default:", "Time Options. Or specify UseAccessTimes to only report data during access times between", "is not None: message += f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if", "report_rm(self, **kwargs): '''Create a report in STK and return them via socket. Args:", "data is left in the socket...') buffer = b'' while True: try: buffer", "typically need to use this method directly, as it is called from .send()", "if the class attribute ack=True Args: None Returns: None ''' pass @abstractmethod def", "before the read() function returns. Returns: bytes : a bytes object containing the", "should be written. TimePeriod : str or None (default: None) The time period", "default time step (usually 60 seconds) is used. Or enter the Array keyword", "buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of .close()''' self.close() def close(self): '''Closes", "or None (default: None) Sets the timeout period for this specific call to", "\"<IntervalOrListSpec>\" see Component Specification. See STK Help for more details on these options.", "that you receive all the data. However, this also adds a mandatory minimum", "use the Intervals option to specify an STK interval file for the time", "@author: jolsten \"\"\" import sys, logging import socket import time from abc import", "finished initializing by the time this is called. send_attempts : int (default: 1)", "is using ACK/NACK. Changing this after .connect() is called will not change the", "= self.socket.recv(1).decode() msg = msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting", "pass @abstractmethod def report_rm(self, **kwargs): '''Create a report in STK and return them", "socket specified. Args: None Returns: None Raises: STKConnectError : If, after .connect_attempts attempts,", "command via socket. Args: message: A string containing the STK Connect command attempts:", "data received from the socket ''' timeout = timeout if timeout is None:", "command if a NACK is received. Returns: None Raises: STKNackError : If too", "many NACK responses were received from STK. Examples: s.send(\"Unload / *\") ''' if", "available data from the TCP/IP socket. Args: timeout : int or None (default:", "see Component Specification. See STK Help for more details on these options. TimeStep", "report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate", "helper class to read the STK Connect Asynchronous Message Format headers.''' def __init__(self,", "Additional Data\" in the STK Help. Summary : str or None (default: None)", "int(self.raw[6].decode()) @property def type_length(self): '''int : The length of the command type string.'''", "& Import External Files - Interval List in STK Help. For information about", "The time period to use for the report. If None, then use the", "For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : Some Report Styles require", "but you must also specify at least one AccessObject. Or use the Intervals", "address as a tuple (host, port) ack : bool A boolean representing whether", "Returns: None Raises: STKNackError : If too many NACK responses were received from", "{TimeStep}' if AdditionalData is not None: message += f' AdditionalData \"{AdditionalData}\"' if Summary", "on which the desired instance of STK is running. port : int The", "timeout will increase the likelihood that you receive all the data. However, this", "array times as time steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData", "= data for i in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1]", "included in the exported report file. Valid values: Include Only Specify the Include", "Interval List component specification. For help on creating the STK interval file, see", "get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv(", "default maximum number of attempts to make while calling .send() before raising STKNackError.", ": str or None (default: None) The time period to use for the", "else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode()", "@abstractmethod def report_rm(self, **kwargs): '''Create a report in STK and return them via", "number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int : The minor version number.''' return", "{AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None,", ") def read(self, timeout=None): '''Read all available data from the TCP/IP socket. Args:", "associate the correct responses with each other if commands are being processed asynchronously.", "responses with STK Connect. Highly recommended to leave this to True. connect_attempts :", "return .join() # buffer = self.read(**kwargs).decode() # if len(buffer) == 0: return []", "summary data included in the exported report file. Valid values: Include Only Specify", "object's timestep). Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the time step", "NACK is received. Returns: None Raises: STKNackError : If too many NACK responses", "True. connect_attempts : int (default: 5) The maximum number of attempts at connecting", "report in STK and return them via socket. Args: ObjPath : str (required)", "None Returns: tuple : (host, port) ''' return (self.host, self.port) def connect(self): '''Connect", "'''int : The major version number.''' return int(self.raw[5].decode()) @property def minor_version(self): '''int :", "total_packets(self): '''int : The total number of packets in the current identifier.''' return", "If 0 is entered then the default time step (usually 60 seconds) is", "AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}' if TimePeriod is not", "if len(buffer) == 0: return [] # logging.debug(f'Report_RM Returned: {buffer}') # return []", "STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl =", "str or None (default: None) The time period to use for the report.", "= self.get_single_message() messages = [] for i in range(int(data)): sm = self.get_single_message() if", "AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None:", "Enter {TimeInterval} to define the start time and stop time for the report", "AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data = self.get_single_message() if hdr.async_type == 'ACK':", ".utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes: host", "{TimePeriod}' if TimeStep is not None: message += f' TimeStep {TimeStep}' if AdditionalData", "try: self._send(message) if self.ack: self.get_ack(message) return except STKNackError as e: if attempt >=", "you must include this option. More information on styles that require AdditionalData can", "{AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer) == 0: return [] return [", "Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length", ": str or None (default: None) Summary data is not generally included. Use", "STKConnectError : If, after .connect_attempts attempts, a connection couldn't be made successfully.' '''", "time steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : str or", "bytestring def __repr__(self): return f'<{self.raw}>' @property def sync(self): '''str : The sync word,", "have the summary data included in the exported report file. Valid values: Include", "Because network traffic is unpredictable, increasing the timeout will increase the likelihood that", "None Raises: STKNackError : If too many NACK responses were received from STK.", "A boolean representing whether the instance is using ACK/NACK. Changing this after .connect()", "Data\" in the STK Help. Summary : str Valid values: Include Only Summary", "exported report file. Valid values: Include Only Specify the Include value to have", "def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp =", ": The minor version number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int : The", "''' def __init__(self, **kwargs): '''Inits an STK connection object (Connect or AsyncConnect) Args:", "Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header", "asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self): '''int : The total number of", "'''int : The value of the response ID. This should be used to", "+= f' AdditionalData \"{AdditionalData}\"' if Summary is not None: message += f' Summary", "Sets the default maximum number of attempts to make while calling .send() before", "for this identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int : The length of", "f' TimeStep {TimeStep}' if AdditionalData is not None: message += f' AdditionalData \"{AdditionalData}\"'", "-*- \"\"\" Created on Tue Aug 4 20:13:37 2020 @author: jolsten \"\"\" import", ": The length of the command type string.''' return int(self.raw[7:9]) @property def async_type(self):", "Aug 4 20:13:37 2020 @author: jolsten \"\"\" import sys, logging import socket import", "f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message += f' AccessObject", "pdl - len(data) ).decode() return hdr, data def get_multi_message(self): logging.debug('Getting Message Block:') hdr,", "_send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read all available", "the data received from the socket ''' timeout = timeout if timeout is", "return them via socket. Args: ObjPath : str (required) The STK Object Path", "abstractmethod from .exceptions import * from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An", "(default: None) The timestep to use for the report. If None, then use", "returning buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of .close()''' self.close() def close(self):", "between the <ObjectPath> and an AccessObject, but you must also specify at least", "= self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for", "is entered in seconds and must be between 0 and 3600 seconds. If", "0: return [] # logging.debug(f'Report_RM Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings", "the Intervals option to specify an STK interval file for the time period", "timeout period for calls to .read() before assuming all data was received. '''", "types of reports you must include this option. More information on styles that", "STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes: host : str", "was received. ''' def __init__(self, **kwargs): '''Inits an STK connection object (Connect or", "is entered in seconds and must be between 0.000001 and 1000000000.0 seconds. Or", "written. TimePeriod : str or None (default: None) The time period to use", "import sys, logging import socket import time from abc import ABCMeta, abstractmethod from", "all data was received. Because network traffic is unpredictable, increasing the timeout will", "the rest of the report; use the Only value to have only the", "None: message += f' TimeStep {TimeStep}' if AdditionalData is not None: message +=", "{Summary}' if AllLines is not None: message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings", "self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout', 1 ) )", "str or None (default: None) Summary data is not generally included. Use this", "x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper class to", "or None (default: None) Some Report Styles require additional or pre-data, such as", "port) ''' return (self.host, self.port) def connect(self): '''Connect to the STK Connect socket", "self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack is not True: self.send(f'ConControl", "self.send(message) messages = self.get_multi_message() return [x[1] for x in messages] # report =", "None: message += f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return [x[1] for", "message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is", "msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets): hdr,", "bool (default: True) Specifies whether or not to use ACK/NACK responses with STK", "[] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data = self.get_single_message() if hdr.async_type", "a path to the desired .RST file. FilePath : str or path-like object", ") self.port = int( kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack', True) )", "{hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets):", "connections. address : tuple The address as a tuple (host, port) ack :", "socket.SOCK_STREAM) time.sleep(3) # give STK a moment to start self._connect() if type(self) ==", "left in the socket...') buffer = b'' while True: try: buffer += self.socket.recv(4096)", "Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data", "if self.ack is not True: self.send(f'ConControl / AckOff') def _connect(self): attempt = 0", "the default time step (usually 60 seconds) is used. Or enter the Array", "data for i in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] =", "in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1]", "logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in", "Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets", "Specification. See STK Help for more details on these options. TimeStep : float", "Style : str or path-like object (required) The Style name, if it is", "not None: message += f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode() if len(buffer)", "+= 1 try: self._send(message) if self.ack: self.get_ack(message) return except STKNackError as e: if", "component specification to use the array times as time steps. For information about", "the summary data included in the exported report file. Specify the Include value", "Component Specification. AdditionalData : str or None (default: None) Some Report Styles require", "b'' while True: try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer')", "'''str : The value of the command type string.''' return (self.raw[9:24])[0:self.type_length] @property def", "require additional or pre-data, such as a comparison object for the RIC report", "is not None: message += f' AdditionalData \"{AdditionalData}\"' if Summary is not None:", "span. For valid {TimeInterval} values see Time Options. Or specify UseAccessTimes to only", "Component Specification. AdditionalData : Some Report Styles require additional or pre-data, such as", "True: attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else:", "to .read() before assuming all data was received. Because network traffic is unpredictable,", ".exceptions import * from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect", "== AsyncConnect: self.send(f'ConControl / AsyncOn') else: self.send(f'ConControl / AsyncOff') if self.ack is not", "'''Closes the STK Connect socket. Args: None Returns: None ''' try: self.socket.close() except:", "specific call to .read() before assuming all data was received. Because network traffic", "into STK (or is a default report style). Otherwise, pass a path to", "True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts = int( kwargs.get('send_attempts', 1)", "''' return (self.host, self.port) def connect(self): '''Connect to the STK Connect socket specified.", "exported report file. Specify the Include value to have the summary included with", "AllLines is not None: message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self,", "the timeout period for this specific call to .read() before assuming all data", "specify at least one AccessObject. Or use the Intervals option to specify an", "'''int : The sequence number of the current packet for this identifier.''' return", "received. Because network traffic is unpredictable, increasing the timeout will increase the likelihood", "additional or pre-data, such as a comparison object for the RIC report for", "disconnect(self): '''Alias of .close()''' self.close() def close(self): '''Closes the STK Connect socket. Args:", "the time step <Value> to be used in creating the report. This value", "reported. Returns: None ''' pass @abstractmethod def report_rm(self, **kwargs): '''Create a report in", "str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read all available data from", "option, to have the summary data included in the exported report file. Valid", "0 while True: attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError:", "report span. For valid {TimeInterval} values see Time Options. Or specify UseAccessTimes to", "times to send the command if a NACK is received. Returns: None Raises:", "Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start time and stop", "+= f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return [x[1] for x in", "None @property def address(self): '''The socket address tuple. Args: None Returns: tuple :", "period or an Interval or Interval List component specification. For help on creating", "msg in messages: # report += # return .join() # buffer = self.read(**kwargs).decode()", "Sets the timeout period for this specific call to .read() before assuming all", "if AllLines is not None: message += f' AllLines {AllLines}' self.send(message) buffer =", "get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create a report", "the command type string.''' return int(self.raw[7:9]) @property def async_type(self): '''str : The value", "pre-data, such as a comparison object for the RIC report for a Satellite.", "STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read", ": str (default: 'localhost') port : int (default: 5001) ack : bool (default:", "logging.debug('ACK Received') return elif msg == 'NAC': k = self.socket.recv(1).decode() msg = msg", "hdr, data = self.get_single_message() messages = [] for i in range(int(data)): sm =", "creating the report. This value is entered in seconds and must be between", "in the STK Help. Summary : str Valid values: Include Only Summary data", "always be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int : The header_length, should", "adds a mandatory minimum delay before the read() function returns. ''' self._kwargs =", "STKNackError as e: if attempt >= attempts: logging.error(f'send() failed, received NACK too many", "def __del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block until an ACK is received", "import time from abc import ABCMeta, abstractmethod from .exceptions import * from .utils", "hdr = AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv( pdl ).decode() while len(data)", "str (default: 'localhost') port : int (default: 5001) ack : bool (default: True)", "Raises: STKNackError : If too many NACK responses were received from STK. Examples:", "made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a moment", "None (default: None) Some Report Styles require additional or pre-data, such as a", "is not None: message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath,", "data def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp", "ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath}", "be made, in case the instance of STK hasn't finished initializing by the", ": str Valid values: Include Only Summary data is not generally included. Use", "if a NACK is received. Returns: None Raises: STKNackError : If too many", "AdditionalData \"{AdditionalData}\"' if Summary is not None: message += f' Summary {Summary}' if", "is received. Returns: None Raises: STKNackError : If too many NACK responses were", "return [] # logging.debug(f'Report_RM Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def", "def __init__(self, bytestring): '''Inits a new object using the raw values, passed as", "def minor_version(self): '''int : The minor version number.''' return int(self.raw[6].decode()) @property def type_length(self):", "you receive all the data. However, this also adds a mandatory minimum delay", "for i in range(int(data)): sm = self.get_single_message() if len(sm) > 0: messages.append(sm) return", "if any exception caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect to", "is a default report style). Otherwise, pass a path to the desired .RST", "return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The value of the response ID.", "Specifies whether or not to use ACK/NACK responses with STK Connect. Highly recommended", "hdr.async_type == 'ACK': return True elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")')", "@inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK", "+= 1 try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit", "the summary data included in the exported report file. Valid values: Include Only", "the STK Connect socket. Args: None Returns: None ''' try: self.socket.close() except: pass", "a Connect command via socket. Args: message: A string containing the STK Connect", "in messages: # report += # return .join() # buffer = self.read(**kwargs).decode() #", "def report_rm(self, **kwargs): '''Create a report in STK and return them via socket.", "= msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK,", "correct responses with each other if commands are being processed asynchronously. ''' return", "information about \"<IntervalOrListSpec>\" see Component Specification. See STK Help for more details on", "\"<IntervalOrListSpec>\"} Enter {TimeInterval} to define the start time and stop time for the", "# return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data = self.get_single_message()", "Only value to have only the summary data reported. Returns: None ''' pass", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a moment to start self._connect() if type(self)", "self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5) ) self.send_attempts", "@property def sync(self): '''str : The sync word, should always be \"AGI\"''' return", "of the command type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The", "AccessObject. Or use the Intervals option to specify an STK interval file for", "message): hdr, data = self.get_single_message() if hdr.async_type == 'ACK': return True elif hdr.async_type", "not typically need to use this method directly, as it is called from", "timeout : float Sets the default timeout period for calls to .read() before", "to the socket. send_attempts : int Sets the default maximum number of attempts", "socket...') buffer = b'' while True: try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout", "is left in the socket...') buffer = b'' while True: try: buffer +=", "number of the current packet for this identifier.''' return int(self.raw[34:38]) @property def data_length(self):", "always be 42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str : The version in", "to the socket. Several attempts should be made, in case the instance of", "is running. port : int The port on which the desired instance is", "f' TimePeriod {TimePeriod}' if TimeStep is not None: message += f' TimeStep {TimeStep}'", "raising STKNackError. timeout : int or float (default: 1.0) Sets the default timeout", "self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a moment to start self._connect()", "Include Only Specify the Include value to have the summary included with the", "The maximum number of times to send the command if a NACK is", ": tuple The address as a tuple (host, port) ack : bool A", "AllLines is not None: message += f' AllLines {AllLines}' self.send(message) buffer = self.read(**kwargs).decode()", "def type_length(self): '''int : The length of the command type string.''' return int(self.raw[7:9])", "not None: message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style,", "file for the time period or an Interval or Interval List component specification.", ": int or float (default: 1.0) Sets the default timeout period for calls", "read(self, timeout=None): '''Read all available data from the TCP/IP socket. Args: timeout :", "have only the summary data reported. Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings", "while True: try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None)", "class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes: host : str The host", "mandatory minimum delay before the read() function returns. ''' self._kwargs = kwargs self.host", "as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if no exceptions caught logging.info(f'Connected", ": The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int :", "report += # return .join() # buffer = self.read(**kwargs).decode() # if len(buffer) ==", "+ k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}')", "included in the exported report file. Specify the Include value to have the", "will increase the likelihood that you receive all the data. However, this also", "{Summary}' if AllLines is not None: message += f' AllLines {AllLines}' self.send(message) messages", "self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is left in the socket...') buffer =", "Connect connection class. Attributes: host : str The host on which the desired", "before raising STKNackError. timeout : int or float (default: 1.0) Sets the default", "current packet for this identifier.''' return int(self.raw[34:38]) @property def data_length(self): '''int : The", "The STK Object Path for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style :", "and stop time for the report span. For valid {TimeInterval} values see Time", "while calling .send() before raising STKNackError. timeout : float Sets the default timeout", "entered in seconds and must be between 0 and 3600 seconds. If 0", "+= self.socket.recv( pdl - len(data) ).decode() return hdr, data def get_multi_message(self): logging.debug('Getting Message", "len(buffer) == 0: return [] return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:]", "\"<TimeArraySpec>\" see Component Specification. AdditionalData : Some Report Styles require additional or pre-data,", "buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer def", "Block:') hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1]", "representing whether the instance is using ACK/NACK. Changing this after .connect() is called", "Created on Tue Aug 4 20:13:37 2020 @author: jolsten \"\"\" import sys, logging", "str Valid values: Include Only Summary data is not generally included. Use this", "= f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message += f'", ") self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5) )", "None) Summary data is not generally included. Use this option, to have the", "need to use this method directly, as it is called from .send() if", "report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath}", "in messages] # report = '' # for msg in messages: # report", "= int( kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts =", "self.socket.recv(1).decode() msg = msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK", "buffer def disconnect(self): '''Alias of .close()''' self.close() def close(self): '''Closes the STK Connect", "the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like object (required)", "time and stop time for the report span. For valid {TimeInterval} values see", "ID. This should be used to associate the correct responses with each other", "def __init__(self, **kwargs): '''Inits an STK connection object (Connect or AsyncConnect) Args: host", "return [] return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader():", "+= f' TimePeriod {TimePeriod}' if TimeStep is not None: message += f' TimeStep", "socket ''' timeout = timeout if timeout is None: timeout = self.timeout self.socket.setblocking(False)", "make while calling .send() before raising STKNackError. timeout : int or float (default:", "self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length = int(length) data = self.socket.recv(length).decode() return header,", "data def get_multi_message(self): hdr, data = self.get_single_message() messages = [] for i in", "{Summary}' if AllLines is not None: message += f' AllLines {AllLines}' self.send(message) buffer", "call to .read() before assuming all data was received. Because network traffic is", "\"<TimeArraySpec>\" see Component Specification. AdditionalData : str or None (default: None) Some Report", "such as a comparison object for the RIC report for a Satellite. For", "this after .connect() is called will not change the mode. connect_attempts : int", "if timeout is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data", "a Time Array component specification to use the array times as time steps.", "a mandatory minimum delay before the read() function returns. ''' self._kwargs = kwargs", "raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None):", "self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read all available data from the TCP/IP", "steps calculated on a specific time boundary. This value is entered in seconds", "about \"<TimeArraySpec>\" see Component Specification. AdditionalData : Some Report Styles require additional or", "the report should be written. TimePeriod : str or None (default: None) The", "new object using the raw values, passed as bytes or str.''' if isinstance(bytestring,", "of the command type string.''' return int(self.raw[7:9]) @property def async_type(self): '''str : The", ".RST file. TimePeriod : str or None (default: None) The time period to", "function returns. ''' self._kwargs = kwargs self.host = str( kwargs.get('host', 'localhost') ) self.port", "called from .send() if the class attribute ack=True Args: None Returns: None '''", "response ID. This should be used to associate the correct responses with each", "timeout if timeout is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no", "AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not None: message", "'''str : The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int", "seconds) is used. Or enter the Array keyword with a Time Array component", "Array keyword with a Time Array component specification to use the array times", "each other if commands are being processed asynchronously. ''' return int(self.raw[24:30]) @property def", ": bool (default: True) Specifies whether or not to use ACK/NACK responses with", "== 'ACK': # logging.debug('ACK Received') return elif msg == 'NAC': k = self.socket.recv(1).decode()", "report in STK and save it to a file. Args: ObjPath : str", "messages] # report = '' # for msg in messages: # report +=", "STK Connect Asynchronous Message Format headers.''' def __init__(self, bytestring): '''Inits a new object", "calculated on a specific time boundary. This value is entered in seconds and", "make while calling .send() before raising STKNackError. timeout : float Sets the default", "STK connection object (Connect or AsyncConnect) Args: host : str (default: 'localhost') port", "logging.debug('Reading until no data is left in the socket...') buffer = b'' while", "None (default: None) Summary data is not generally included. Use this option, to", "Help. Summary : str Valid values: Include Only Summary data is not generally", "Time Array component specification to use the array times as time steps. For", "value of the response ID. This should be used to associate the correct", "= str( kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port', 5001) ) self.ack =", "= None @property def address(self): '''The socket address tuple. Args: None Returns: tuple", "default (typically the parent object's timestep). Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\"", "Connect. Users should not typically need to use this method directly, as it", "@property def async_type(self): '''str : The value of the command type string.''' return", "None) Some Report Styles require additional or pre-data, such as a comparison object", "the exported report file. Specify the Include value to have the summary included", "<Value> Array \"<TimeArraySpec>\" Enter the time step <Value> to be used in creating", "Options. Or specify UseAccessTimes to only report data during access times between the", "on Tue Aug 4 20:13:37 2020 @author: jolsten \"\"\" import sys, logging import", "by the time this is called. send_attempts : int (default: 1) Sets the", "reports you must include this option. More information on styles that require AdditionalData", "# if len(buffer) == 0: return [] # logging.debug(f'Report_RM Returned: {buffer}') # return", "generally included. Use this option, to have the summary data included in the", "*/{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is not None: message", "@property def address(self): '''The socket address tuple. Args: None Returns: tuple : (host,", "too many NACK responses were received from STK. Examples: s.send(\"Unload / *\") '''", "returns. Returns: bytes : a bytes object containing the data received from the", "timeout period for this specific call to .read() before assuming all data was", "message += f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return [x[1] for x", "msg == 'NAC': k = self.socket.recv(1).decode() msg = msg + k raise STKNackError(f'NACK", "STK Connect. Users should not typically need to use this method directly, as", "k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1)", "= self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr, data = self.get_single_message() messages =", "2020 @author: jolsten \"\"\" import sys, logging import socket import time from abc", "values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the time step <Value> to be", "# buffer = self.read(**kwargs).decode() # if len(buffer) == 0: return [] # logging.debug(f'Report_RM", "the exported report file. Valid values: Include Only Specify the Include value to", "See STK Help for more details on these options. TimeStep : float or", "Array component specification to use the array times as time steps. For information", "to have the summary data included in the exported report file. Specify the", "it is called from .send() if the class attribute ack=True Args: None Returns:", "object using the raw values, passed as bytes or str.''' if isinstance(bytestring, bytes):", "Received') return elif msg == 'NAC': k = self.socket.recv(1).decode() msg = msg +", "for the report span. For valid {TimeInterval} values see Time Options. Or specify", "'''Create a report in STK and save it to a file. Args: ObjPath", "Style \"{Style}\"' if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}' if", "accepting connections. address : tuple The address as a tuple (host, port) ack", ") self.socket = None @property def address(self): '''The socket address tuple. Args: None", "for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like object", "sequence number of the current packet for this identifier.''' return int(self.raw[34:38]) @property def", "= self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data if msg_grp[-1] == '': del msg_grp[-1]", "More information on styles that require AdditionalData can be found at \"Report Additional", "NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split()", "'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg)", "Format headers.''' def __init__(self, bytestring): '''Inits a new object using the raw values,", "in seconds and must be between 0.000001 and 1000000000.0 seconds. Or enter Bound", "== 'NAC': k = self.socket.recv(1).decode() msg = msg + k raise STKNackError(f'NACK Received:", "5001) ) self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts = int( kwargs.get('connect_attempts', 5)", "TimeStep {TimeStep}' if AdditionalData is not None: message += f' AdditionalData \"{AdditionalData}\"' if", "via socket on {self.host}:{self.port}') time.sleep( 3 ) def send(self, message, attempts=None): '''Sends a", "logging import socket import time from abc import ABCMeta, abstractmethod from .exceptions import", "time.sleep(3) # give STK a moment to start self._connect() if type(self) == AsyncConnect:", ": (host, port) ''' return (self.host, self.port) def connect(self): '''Connect to the STK", "# return .join() # buffer = self.read(**kwargs).decode() # if len(buffer) == 0: return", "(default: None) Sets the timeout period for this specific call to .read() before", "be \"AGI\"''' return self.raw[0:3].decode() @property def header_length(self): '''int : The header_length, should always", "42.''' return int(self.raw[3:5].decode()) @property def version(self): '''str : The version in major.minor format.'''", "inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes: host : str The", "@property def identifier(self): '''int : The value of the response ID. This should", "float( kwargs.get('timeout', 1 ) ) self.socket = None @property def address(self): '''The socket", "= AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv( pdl ).decode() while len(data) <", "connecting to the socket. Several attempts should be made, in case the instance", "data during access times between the <ObjectPath> and an AccessObject, but you must", "None: message += f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None,", "for the time period or an Interval or Interval List component specification. For", "object (required) The Style name, if it is already loaded into STK (or", "save it to a file. Args: ObjPath : str (required) The STK Object", "object (required) The path to the file to which the report should be", "socket address tuple. Args: None Returns: tuple : (host, port) ''' return (self.host,", "STK Object Path for the desired report. e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str", "1 try: self._send(message) if self.ack: self.get_ack(message) return except STKNackError as e: if attempt", "''' try: self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod", "== 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr =", "def close(self): '''Closes the STK Connect socket. Args: None Returns: None ''' try:", "TimeStep : float or str The timestep to use for the report. If", "self.get_single_message() if len(sm) > 0: messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath, Style,", ") def send(self, message, attempts=None): '''Sends a Connect command via socket. Args: message:", "The maximum number of attempts at connecting to the socket. send_attempts : int", "steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : str or None", "failed, received NACK too many times') raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")')", "the timeout will increase the likelihood that you receive all the data. However,", "e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if no exceptions caught logging.info(f'Connected to", "if AllLines is not None: message += f' AllLines {AllLines}' self.send(message) messages =", "the read() function returns. ''' self._kwargs = kwargs self.host = str( kwargs.get('host', 'localhost')", "return header, data def get_multi_message(self): hdr, data = self.get_single_message() messages = [] for", "pass a path to the desired .RST file. FilePath : str or path-like", "Only Specify the Include value to have the summary included with the rest", "specify UseAccessTimes to only report data during access times between the <ObjectPath> and", "\"\"\" import sys, logging import socket import time from abc import ABCMeta, abstractmethod", "def get_ack(self, message): '''Block until an ACK is received from STK Connect. Users", "tuple : (host, port) ''' return (self.host, self.port) def connect(self): '''Connect to the", "len(data) < hdr.data_length: data += self.socket.recv( pdl - len(data) ).decode() return hdr, data", "not change the mode. connect_attempts : int The maximum number of attempts at", "self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self,", "return int(self.raw[24:30]) @property def total_packets(self): '''int : The total number of packets in", "to leave this to True. connect_attempts : int (default: 5) The maximum number", "containing the STK Connect command attempts: Optional; The maximum number of times to", "= self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is left in the socket...')", "Optional; The maximum number of times to send the command if a NACK", "steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : Some Report Styles", "maximum number of attempts at connecting to the socket. send_attempts : int Sets", "assuming all data was received. ''' def __init__(self, **kwargs): '''Inits an STK connection", "AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return [x[1] for x in messages] #", "f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The major version number.''' return int(self.raw[5].decode()) @property", "loop if any exception caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect", "at connecting to the socket. Several attempts should be made, in case the", "[None] * hdr.total_packets msg_grp[hdr.packet_number-1] = data for i in range(1,hdr.total_packets): hdr, data =", "self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is left in the socket...') buffer", "Otherwise, pass a path to the desired .RST file. FilePath : str or", "None, then use the default (typically the parent object's timestep). Valid values: <Value>", "STK Help. For information about \"<IntervalOrListSpec>\" see Component Specification. See STK Help for", "to have only the summary data reported. Returns: None ''' pass class Connect(_AbstractConnect):", ": str (required) The STK Object Path for the desired report. e.g. Facility/A_Facility_Name", "def read(self, timeout=None): '''Read all available data from the TCP/IP socket. Args: timeout", "+= f' AccessObject {AccessObjectPath}' if TimePeriod is not None: message += f' TimePeriod", "AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath", "STK Help. Summary : str Valid values: Include Only Summary data is not", "this to True. connect_attempts : int (default: 5) The maximum number of attempts", "have the summary included with the rest of the report; use the Only", "return (self.host, self.port) def connect(self): '''Connect to the STK Connect socket specified. Args:", "attempts = self.send_attempts attempt = 0 while True: attempt += 1 try: self._send(message)", "must also specify at least one AccessObject. Or use the Intervals option to", "class attribute ack=True Args: None Returns: None ''' pass @abstractmethod def get_single_message(self): pass", "included with the rest of the report; use the Only value to have", "get_single_message(self): header = self.socket.recv(40).decode() cmd_name, length = header.rstrip().split() length = int(length) data =", "entered in seconds and must be between 0.000001 and 1000000000.0 seconds. Or enter", "'''Create a report in STK and return them via socket. Args: ObjPath :", "initializing by the time this is called. send_attempts : int (default: 1) Sets", "to use the array times as time steps. For information about \"<TimeArraySpec>\" see", "interval file for the time period or an Interval or Interval List component", "self.raw = bytestring def __repr__(self): return f'<{self.raw}>' @property def sync(self): '''str : The", "period for this specific call to .read() before assuming all data was received.", "be used to associate the correct responses with each other if commands are", "str( kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port', 5001) ) self.ack = bool(", "3600 seconds. If 0 is entered then the default time step (usually 60", "'' # for msg in messages: # report += # return .join() #", "jolsten \"\"\" import sys, logging import socket import time from abc import ABCMeta,", "Interval List in STK Help. For information about \"<IntervalOrListSpec>\" see Component Specification. See", "abc import ABCMeta, abstractmethod from .exceptions import * from .utils import STK_DATEFMT, inherit_docstrings", "the STK Help. Summary : str Valid values: Include Only Summary data is", "attempt = 0 while True: attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError as", "value to have the summary included with the rest of the report; use", "minor_version(self): '''int : The minor version number.''' return int(self.raw[6].decode()) @property def type_length(self): '''int", "Styles require additional or pre-data, such as a comparison object for the RIC", "def get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length data =", "Summary data is not generally included. Use this option, to have the summary", "packets in the current identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int : The", "to STK via socket on {self.host}:{self.port}') time.sleep( 3 ) def send(self, message, attempts=None):", "hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr", "STK interval file, see Create & Import External Files - Interval List in", "the parent object's time period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\" | \"<IntervalOrListSpec>\"}", "help on creating the STK interval file, see Create & Import External Files", "data = self.socket.recv( pdl ).decode() while len(data) < hdr.data_length: data += self.socket.recv( pdl", "coding: utf-8 -*- \"\"\" Created on Tue Aug 4 20:13:37 2020 @author: jolsten", "1) Sets the default maximum number of attempts to make while calling .send()", "range(int(data)): sm = self.get_single_message() if len(sm) > 0: messages.append(sm) return messages @inherit_docstrings def", "f' AllLines {AllLines}' self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None,", "this method directly, as it is called from .send() if the class attribute", "give STK a moment to start self._connect() if type(self) == AsyncConnect: self.send(f'ConControl /", "Bound <Value> Array \"<TimeArraySpec>\" Enter the time step <Value> to be used in", "STK via socket on {self.host}:{self.port}') time.sleep( 3 ) def send(self, message, attempts=None): '''Sends", "__repr__(self): return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block until an", "Returns: bytes : a bytes object containing the data received from the socket", "of STK is running. port : int The port on which the desired", "/ AsyncOff') if self.ack is not True: self.send(f'ConControl / AckOff') def _connect(self): attempt", "details on these options. TimeStep : float or str (default: None) The timestep", "# for msg in messages: # report += # return .join() # buffer", "int( kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts = int(", "these options. TimeStep : float or str The timestep to use for the", "AsyncConnect) Args: host : str (default: 'localhost') port : int (default: 5001) ack", "STKNackError : If too many NACK responses were received from STK. Examples: s.send(\"Unload", "try: buffer += self.socket.recv(4096) except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer", "read() function returns. ''' self._kwargs = kwargs self.host = str( kwargs.get('host', 'localhost') )", "(default: None) The time period to use for the report. If None, then", "at connecting to the socket. send_attempts : int Sets the default maximum number", "time for the report span. For valid {TimeInterval} values see Time Options. Or", "the current identifier.''' return int(self.raw[30:34]) @property def packet_number(self): '''int : The sequence number", "= self.socket.recv( pdl ).decode() while len(data) < hdr.data_length: data += self.socket.recv( pdl -", "< hdr.data_length: data += self.socket.recv( pdl - len(data) ).decode() return hdr, data def", "Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode()", "Tue Aug 4 20:13:37 2020 @author: jolsten \"\"\" import sys, logging import socket", "send the command if a NACK is received. Returns: None Raises: STKNackError :", ">= self.connect_attempts: raise STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}') time.sleep(", "a new object using the raw values, passed as bytes or str.''' if", "socket import time from abc import ABCMeta, abstractmethod from .exceptions import * from", "# continue loop if any exception caught if attempt >= self.connect_attempts: raise STKConnectError(f'Failed", "option to specify an STK interval file for the time period or an", "Component Specification. See STK Help for more details on these options. TimeStep :", "not None: message += f' Summary {Summary}' if AllLines is not None: message", "<Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the time step <Value> to be used", "Users should not typically need to use this method directly, as it is", "report. If None, then use the default (typically the parent object's time period).", "use the array times as time steps. For information about \"<TimeArraySpec>\" see Component", "TimeStep is not None: message += f' TimeStep {TimeStep}' if AdditionalData is not", "sm = self.get_single_message() if len(sm) > 0: messages.append(sm) return messages @inherit_docstrings def report(self,", "receive all the data. However, this also adds a mandatory minimum delay before", "no exceptions caught logging.info(f'Connected to STK on {self.host}:{self.port}') return True finally: # continue", "header_length(self): '''int : The header_length, should always be 42.''' return int(self.raw[3:5].decode()) @property def", "Summary : str Valid values: Include Only Summary data is not generally included.", "attempt >= self.connect_attempts: raise STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}')", "def get_multi_message(self): hdr, data = self.get_single_message() messages = [] for i in range(int(data)):", "'localhost') port : int (default: 5001) ack : bool (default: True) Specifies whether", "Args: None Returns: None Raises: STKConnectError : If, after .connect_attempts attempts, a connection", "data included in the exported report file. Valid values: Include Only Specify the", "value of the command type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int :", "hdr, data = self.get_single_message() if hdr.async_type == 'ACK': return True elif hdr.async_type ==", "STK Connect socket. Args: None Returns: None ''' try: self.socket.close() except: pass def", "include this option. More information on styles that require AdditionalData can be found", "kwargs.get('host', 'localhost') ) self.port = int( kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack',", "in buffer.split('AGI421009REPORT_RM ')[1:] ] class AsyncHeader(): '''A helper class to read the STK", "(default: 5001) ack : bool (default: True) Specifies whether or not to use", "the summary data reported. Returns: None ''' pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self,", ").decode() return hdr, data def get_multi_message(self): logging.debug('Getting Message Block:') hdr, data = self.get_single_message()", "float or str The timestep to use for the report. If None, then", ": Some Report Styles require additional or pre-data, such as a comparison object", "data. However, this also adds a mandatory minimum delay before the read() function", "int(self.raw[30:34]) @property def packet_number(self): '''int : The sequence number of the current packet", "AsyncHeader(msg) pdl = hdr.data_length data = self.socket.recv( pdl ).decode() while len(data) < hdr.data_length:", "is called from .send() if the class attribute ack=True Args: None Returns: None", "int or float (default: 1.0) Sets the default timeout period for calls to", "time from abc import ABCMeta, abstractmethod from .exceptions import * from .utils import", "boolean representing whether the instance is using ACK/NACK. Changing this after .connect() is", "@property def major_version(self): '''int : The major version number.''' return int(self.raw[5].decode()) @property def", "from .send() if the class attribute ack=True Args: None Returns: None ''' pass", "desired instance is accepting connections. address : tuple The address as a tuple", "received from the socket ''' timeout = timeout if timeout is None: timeout", "Changing this after .connect() is called will not change the mode. connect_attempts :", "self.socket.recv( pdl ).decode() while len(data) < hdr.data_length: data += self.socket.recv( pdl - len(data)", "time period or an Interval or Interval List component specification. For help on", "connecting to the socket. send_attempts : int Sets the default maximum number of", "hasn't finished initializing by the time this is called. send_attempts : int (default:", "object containing the data received from the socket ''' timeout = timeout if", "messages: # report += # return .join() # buffer = self.read(**kwargs).decode() # if", "timestep to use for the report. If None, then use the default (typically", "f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block until an ACK is", "file to which the report should be written. TimePeriod : str or None", "isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw = bytestring def __repr__(self): return f'<{self.raw}>' @property", "self.port = int( kwargs.get('port', 5001) ) self.ack = bool( kwargs.get('ack', True) ) self.connect_attempts", "keyword with a Time Array component specification to use the array times as", "data is not generally included. Use this option, to have the summary data", "TimePeriod is not None: message += f' TimePeriod {TimePeriod}' if TimeStep is not", "ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}') else: # exit loop if no exceptions caught", "messages = self.get_multi_message() return [x[1] for x in messages] # report = ''", "logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode() ) def read(self, timeout=None): '''Read all available data from the", "until no data is left in the socket...') buffer = b'' while True:", "The value of the command type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int", "= self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK Received') return elif msg ==", "= '' # for msg in messages: # report += # return .join()", "be used in creating the report. This value is entered in seconds and", "e.g. Facility/A_Facility_Name Satellite/A_Satellite_Name Style : str or path-like object (required) The Style name,", "Summary : str or None (default: None) Summary data is not generally included.", "ack=True Args: None Returns: None ''' pass @abstractmethod def get_single_message(self): pass @abstractmethod def", "def get_ack(self, message): msg = self.socket.recv(3).decode() if msg == 'ACK': # logging.debug('ACK Received')", "{AccessObjectPath}' if TimePeriod is not None: message += f' TimePeriod {TimePeriod}' if TimeStep", "f'<{self.raw}>' @property def sync(self): '''str : The sync word, should always be \"AGI\"'''", "float Sets the default timeout period for calls to .read() before assuming all", "length of the command type string.''' return int(self.raw[7:9]) @property def async_type(self): '''str :", "Args: None Returns: None ''' try: self.socket.close() except: pass def __repr__(self): return f'{type(self).__name__}({self.host}:{self.port})'", "send_attempts : int Sets the default maximum number of attempts to make while", "self.get_single_message() messages = [] for i in range(int(data)): sm = self.get_single_message() if len(sm)", "only report data during access times between the <ObjectPath> and an AccessObject, but", "Returns: None ''' pass @abstractmethod def get_single_message(self): pass @abstractmethod def get_multi_message(self): pass @abstractmethod", "data from the TCP/IP socket. Args: timeout : int or None (default: None)", "import * from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection", "bytes): bytestring = bytestring.decode() self.raw = bytestring def __repr__(self): return f'<{self.raw}>' @property def", "timeout = timeout if timeout is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading", "component specification. For help on creating the STK interval file, see Create &", "an Interval or Interval List component specification. For help on creating the STK", "Examples: s.send(\"Unload / *\") ''' if attempts is None: attempts = self.send_attempts attempt", "for the RIC report for a Satellite. For these types of reports you", "ack : bool (default: True) Specifies whether or not to use ACK/NACK responses", "is not None: message += f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return", "if it is already loaded into STK (or is a default report style).", "If too many NACK responses were received from STK. Examples: s.send(\"Unload / *\")", "len(buffer) == 0: return [] # logging.debug(f'Report_RM Returned: {buffer}') # return [] class", "Returns: None ''' pass @abstractmethod def report_rm(self, **kwargs): '''Create a report in STK", "Raises: STKConnectError : If, after .connect_attempts attempts, a connection couldn't be made successfully.'", "change the mode. connect_attempts : int The maximum number of attempts at connecting", "be made successfully.' ''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a", "used in creating the report. This value is entered in seconds and must", "the parent object's timestep). Valid values: <Value> Bound <Value> Array \"<TimeArraySpec>\" Enter the", "attempts to make while calling .send() before raising STKNackError. timeout : int or", "delay before the read() function returns. Returns: bytes : a bytes object containing", "require AdditionalData can be found at \"Report Additional Data\" in the STK Help.", "elif hdr.async_type == 'NACK': raise STKNackError(f'NACK Received: stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode()", "be between 0 and 3600 seconds. If 0 is entered then the default", "int (default: 5001) ack : bool (default: True) Specifies whether or not to", "if Summary is not None: message += f' Summary {Summary}' if AllLines is", "the Array keyword with a Time Array component specification to use the array", "type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The value of the", "see Time Options. Or specify UseAccessTimes to only report data during access times", "f' AccessObject {AccessObjectPath}' if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'", "ABCMeta, abstractmethod from .exceptions import * from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta):", "The version in major.minor format.''' return f'{self.major_version}.{self.minor_version}' @property def major_version(self): '''int : The", "use the default (typically the parent object's time period). Valid values: UseAccessTimes {TimeInterval}", "send_attempts : int (default: 1) Sets the default maximum number of attempts to", "* from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class.", "hdr, data = self.get_single_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp = [None] * hdr.total_packets msg_grp[hdr.packet_number-1] =", "Args: ObjPath : str (required) The STK Object Path for the desired report.", "or path-like object (required) The Style name, if it is already loaded into", "in case the instance of STK hasn't finished initializing by the time this", "@property def packet_number(self): '''int : The sequence number of the current packet for", "External Files - Interval List in STK Help. For information about \"<IntervalOrListSpec>\" see", "message += f' TimeStep {TimeStep}' if AdditionalData is not None: message += f'", "5) The maximum number of attempts at connecting to the socket. Several attempts", "is None: timeout = self.timeout self.socket.setblocking(False) self.socket.settimeout(timeout) logging.debug('Reading until no data is left", ": a bytes object containing the data received from the socket ''' timeout", "of reports you must include this option. More information on styles that require", "Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style", "import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes: host :", "Valid values: Include Only Specify the Include value to have the summary included", "timeout : int or None (default: None) Sets the timeout period for this", "from the socket ''' timeout = timeout if timeout is None: timeout =", "@abstractmethod def report(self, **kwargs): '''Create a report in STK and save it to", "then the default time step (usually 60 seconds) is used. Or enter the", "''' self._kwargs = kwargs self.host = str( kwargs.get('host', 'localhost') ) self.port = int(", ": int or None (default: None) Sets the timeout period for this specific", "command type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The value of", "is received from STK Connect. Users should not typically need to use this", "def __repr__(self): return f'<{self.raw}>' @property def sync(self): '''str : The sync word, should", "enter the Array keyword with a Time Array component specification to use the", "(self.host, self.port) def connect(self): '''Connect to the STK Connect socket specified. Args: None", "the desired instance is accepting connections. address : tuple The address as a", "while True: attempt += 1 try: self._send(message) if self.ack: self.get_ack(message) return except STKNackError", "message): '''Block until an ACK is received from STK Connect. Users should not", "value to have only the summary data reported. Returns: None ''' pass @abstractmethod", "int(self.raw[7:9]) @property def async_type(self): '''str : The value of the command type string.'''", "will not change the mode. connect_attempts : int The maximum number of attempts", "Returns: tuple : (host, port) ''' return (self.host, self.port) def connect(self): '''Connect to", "not None: message += f' AllLines {AllLines}' self.send(message) messages = self.get_multi_message() return [x[1]", "leave this to True. connect_attempts : int (default: 5) The maximum number of", "the report steps calculated on a specific time boundary. This value is entered", "msg = msg + k raise STKNackError(f'NACK Received: stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or", "def async_type(self): '''str : The value of the command type string.''' return (self.raw[9:24])[0:self.type_length]", "@inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message =", "via socket. Args: ObjPath : str (required) The STK Object Path for the", "Files - Interval List in STK Help. For information about \"<IntervalOrListSpec>\" see Component", "Satellite. For these types of reports you must include this option. More information", "STK and return them via socket. Args: ObjPath : str (required) The STK", "def send(self, message, attempts=None): '''Sends a Connect command via socket. Args: message: A", "to make while calling .send() before raising STKNackError. timeout : float Sets the", "the TCP/IP socket. Args: timeout : int or None (default: None) Sets the", "this specific call to .read() before assuming all data was received. Because network", "default (typically the parent object's time period). Valid values: UseAccessTimes {TimeInterval} Intervals {\"<FilePath>\"", "while True: attempt += 1 try: self.socket.connect(self.address) except ConnectionRefusedError as e: logging.debug(f'ConnectionRefusedError: {e}')", "{e}') else: # exit loop if no exceptions caught logging.info(f'Connected to STK on", "''' self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) time.sleep(3) # give STK a moment to start", "is not generally included. Use this option, to have the summary data included", "**kwargs): '''Inits an STK connection object (Connect or AsyncConnect) Args: host : str", ".send() if the class attribute ack=True Args: None Returns: None ''' pass @abstractmethod", "data = self.socket.recv(length).decode() return header, data def get_multi_message(self): hdr, data = self.get_single_message() messages", "kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout', 1 ) ) self.socket = None", "pass a path to the desired .RST file. TimePeriod : str or None", "__del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block until an ACK is received from", "and must be between 0 and 3600 seconds. If 0 is entered then", "= self.get_multi_message() return [x[1] for x in messages] # report = '' #", "f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if AccessObjectPath is not None:", "return f'{type(self).__name__}({self.host}:{self.port})' def __del__(self): self.close() @abstractmethod def get_ack(self, message): '''Block until an ACK", "@abstractmethod def get_multi_message(self): pass @abstractmethod def report(self, **kwargs): '''Create a report in STK", "The total number of packets in the current identifier.''' return int(self.raw[30:34]) @property def", "''' pass @abstractmethod def report_rm(self, **kwargs): '''Create a report in STK and return", "STK Connect command attempts: Optional; The maximum number of times to send the", "from .utils import STK_DATEFMT, inherit_docstrings class _AbstractConnect(metaclass=ABCMeta): '''An STK Connect connection class. Attributes:", "@inherit_docstrings def get_ack(self, message): hdr, data = self.get_single_message() if hdr.async_type == 'ACK': return", "__repr__(self): return f'<{self.raw}>' @property def sync(self): '''str : The sync word, should always", "for i in range(1,hdr.total_packets): hdr, data = self.get_message() logging.debug(f'GotMessage: {hdr}{data}') msg_grp[hdr.packet_number-1] = data", "the command type string.''' return (self.raw[9:24])[0:self.type_length] @property def identifier(self): '''int : The value", "AdditionalData : Some Report Styles require additional or pre-data, such as a comparison", "0 while True: attempt += 1 try: self._send(message) if self.ack: self.get_ack(message) return except", "Or specify UseAccessTimes to only report data during access times between the <ObjectPath>", "attempt += 1 try: self._send(message) if self.ack: self.get_ack(message) return except STKNackError as e:", "str The timestep to use for the report. If None, then use the", "path to the desired .RST file. TimePeriod : str or None (default: None)", "[] # logging.debug(f'Report_RM Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self,", "stk.send(\"{message.rstrip()}\")') else: logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}') sys.exit(1) def get_single_message(self): header =", "with STK Connect. Highly recommended to leave this to True. connect_attempts : int", "NACK too many times') raise STKNackError(e) def _send(self, message: str): logging.debug(f'stk.send(\"{message}\")') self.socket.send( (message+'\\n').encode()", "timeout period for calls to .read() before assuming all data was received. Because", "the STK interval file, see Create & Import External Files - Interval List", "int(self.raw[34:38]) @property def data_length(self): '''int : The length of the data field for", "logging.debug(f'Report_RM Returned: {buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr,", "Summary=None, AllLines=None, **kwargs): message = f'Report_RM */{ObjPath} Style \"{Style}\"' if AccessObjectPath is not", "must be between 0.000001 and 1000000000.0 seconds. Or enter Bound <Value> to have", "reached, returning buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of .close()''' self.close() def", "bytes : a bytes object containing the data received from the socket '''", "specific time boundary. This value is entered in seconds and must be between", "the time this is called. send_attempts : int (default: 1) Sets the default", "{buffer}') # return [] class AsyncConnect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): hdr, data =", "\"Report Additional Data\" in the STK Help. Summary : str or None (default:", "called will not change the mode. connect_attempts : int The maximum number of", "read the STK Connect Asynchronous Message Format headers.''' def __init__(self, bytestring): '''Inits a", "self.send(message) @inherit_docstrings def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs):", "Specification. AdditionalData : str or None (default: None) Some Report Styles require additional", "instance is accepting connections. address : tuple The address as a tuple (host,", "logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of .close()''' self.close()", "all data was received. ''' def __init__(self, **kwargs): '''Inits an STK connection object", "Summary=None, AllLines=None): message = f'ReportCreate */{ObjPath} Style \"{Style}\" Type \"Export\" File \"{FilePath}\"' if", "stk.send(\"{message}\")') def get_single_message(self): msg = self.socket.recv(42).decode() hdr = AsyncHeader(msg) pdl = hdr.data_length data", "# logging.debug('ACK Received') return elif msg == 'NAC': k = self.socket.recv(1).decode() msg =", "an STK interval file for the time period or an Interval or Interval", "or str.''' if isinstance(bytestring, bytes): bytestring = bytestring.decode() self.raw = bytestring def __repr__(self):", ".connect() is called will not change the mode. connect_attempts : int The maximum", "after .connect() is called will not change the mode. connect_attempts : int The", "'''int : The header_length, should always be 42.''' return int(self.raw[3:5].decode()) @property def version(self):", "if self.ack: self.get_ack(message) return except STKNackError as e: if attempt >= attempts: logging.error(f'send()", "(default: True) Specifies whether or not to use ACK/NACK responses with STK Connect.", "calls to .read() before assuming all data was received. ''' def __init__(self, **kwargs):", ": float or str The timestep to use for the report. If None,", "at \"Report Additional Data\" in the STK Help. Summary : str Valid values:", "as time steps. For information about \"<TimeArraySpec>\" see Component Specification. AdditionalData : str", "= hdr.data_length data = self.socket.recv( pdl ).decode() while len(data) < hdr.data_length: data +=", "if commands are being processed asynchronously. ''' return int(self.raw[24:30]) @property def total_packets(self): '''int", "msg_grp @inherit_docstrings def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None): message", "on these options. TimeStep : float or str The timestep to use for", "except socket.timeout: logging.debug('Timeout reached, returning buffer') self.socket.settimeout(None) return buffer def disconnect(self): '''Alias of", "= bytestring.decode() self.raw = bytestring def __repr__(self): return f'<{self.raw}>' @property def sync(self): '''str", "STK is running. port : int The port on which the desired instance", "(required) The Style name, if it is already loaded into STK (or is", "adds a mandatory minimum delay before the read() function returns. Returns: bytes :", "str or path-like object (required) The path to the file to which the", "as it is called from .send() if the class attribute ack=True Args: None", "(default: 'localhost') port : int (default: 5001) ack : bool (default: True) Specifies", "pass class Connect(_AbstractConnect): @inherit_docstrings def get_ack(self, message): msg = self.socket.recv(3).decode() if msg ==", "for calls to .read() before assuming all data was received. ''' def __init__(self,", "step <Value> to be used in creating the report. This value is entered", "pdl ).decode() while len(data) < hdr.data_length: data += self.socket.recv( pdl - len(data) ).decode()", "int(self.raw[24:30]) @property def total_packets(self): '''int : The total number of packets in the", "to read the STK Connect Asynchronous Message Format headers.''' def __init__(self, bytestring): '''Inits", ") self.send_attempts = int( kwargs.get('send_attempts', 1) ) self.timeout = float( kwargs.get('timeout', 1 )", "self.raw[0:3].decode() @property def header_length(self): '''int : The header_length, should always be 42.''' return", "in STK Help. For information about \"<IntervalOrListSpec>\" see Component Specification. See STK Help", "is used. Or enter the Array keyword with a Time Array component specification", "= self.get_single_message() if len(sm) > 0: messages.append(sm) return messages @inherit_docstrings def report(self, ObjPath,", "self.close() def close(self): '''Closes the STK Connect socket. Args: None Returns: None '''" ]
[ "import * from .PBX_Base_Phase import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project,", "from .PBXResolver import * from .PBX_Base_Phase import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func,", "__init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid = 'com.apple.buildphase.resources';", "* from .PBX_Base_Phase import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier):", "PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid", ".PBXResolver import * from .PBX_Base_Phase import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary,", "* class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project,", "class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier);", "from .PBX_Base_Phase import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase,", "import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary,", "project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid = 'com.apple.buildphase.resources'; self.phase_type = 'Copy", "dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid = 'com.apple.buildphase.resources'; self.phase_type =", "identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid = 'com.apple.buildphase.resources'; self.phase_type = 'Copy Resources';", "def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid =", "lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier); self.bundleid = 'com.apple.buildphase.resources'; self.phase_type", ".PBX_Base_Phase import * class PBXResourcesBuildPhase(PBX_Base_Phase): def __init__(self, lookup_func, dictionary, project, identifier): super(PBXResourcesBuildPhase, self).__init__(lookup_func," ]
[ "network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id,", "self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref", "devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for device in range(next_device): if device not", "not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap)", "Unless required by applicable law or agreed to in writing, software # distributed", "* 1024 * 1024 * 1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\":", "\"\", \"qos_algorithm_params\": {} } if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref =", "h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self):", "self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name)", "= str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk entity_info", "= str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in guestdata:", "conn.request(\"GET\", path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self,", "vm_ref, memory_target, memory_target ) if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if not", "int(sr['virtual_allocation']) / (1024 * 1024 * 1024), int(sr['physical_size']) / (1024 * 1024 *", "\"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"])", "else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\"", "= False self.poolinfo = poolinfo self.format_for = Formatter() self.connect() def connect(self): self.connection =", "= self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export job\" )", "\"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip", "if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\")", "__init__(self, poolinfo): self.connection = False self.poolinfo = poolinfo self.format_for = Formatter() self.connect() def", "in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]})", ") self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size", "self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export job\" ) path", "* 1024 new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id))", "# If host is slave, connect to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"]", "isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup =", "self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return", "vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in", "\"memory_target\" : memory , \"memory_static_min\" : memory, \"memory_static_max\" : memory } memory_target =", "httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse() response.status response.read()", "\"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"]", "return guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id,", "devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1", "and # limitations under the License. # # @author: <NAME>, Locaweb. # @author:", "force=False): vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.get_disks(vm_ref) return [self._disk_info(d) for d in disks] def disk_create(self, guest_id, data): vm_ref =", "/ (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} )", "for n in vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data should contain at", "\"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection = False self.poolinfo =", "= {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] )", "\"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in", "guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def", "if disk[\"userdevice\"] == disk_id: return disk entity_info = \"%s - on Guest\" %", "= self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"]", "response = conn.getresponse() response.status response.read() except socket.error as err: if err.errno == errno.ECONNRESET:", "snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap)", "\"memory_static_min\" : memory, \"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20)", "h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id)", ") def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm)", "} vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"]", "in data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"],", "device not in devices: next_device = device break vbd_rec = { \"VM\": vm_ref,", "* 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec): return(", "self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id,", "self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" %", "logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" }", "= self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id:", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "= str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target,", "int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024", "pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data:", "network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id)", "if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes)", "def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"]", "host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages = [] for sr", "self.poolinfo = poolinfo self.format_for = Formatter() self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\"", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self,", "\"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024 *", "\"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack", "self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id))", "return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\":", "in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if", "media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref)", "vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) / (1024 * 1024", "vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref: return", "= 0 for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self,", "= self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec", "EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs:", "host['address'] ) ) def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'],", "\"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\":", "self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0]", "def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return", "in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref,", "\"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref =", "% guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref", "except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref =", "data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"]", "\"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} }", "def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices = [] for vbd in", "self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session", "vm_ref = self._vm_ref(guest_id) devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device =", "guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"]", "% ( session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response =", "if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'),", "\"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks,", "and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else:", "self._storage_info(sr_ref) def guest_list(self): guests = [] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot'))", "try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id)", "def network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record", "if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"]", "def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref,", "cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\": None}", "if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = {", "devices: next_device = device break vif_record = { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\":", "def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref)", "snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id,", "tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks =", "name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\":", "pv_args = \"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args)", "except: pass if \"ratelimit\" in data: if data[\"ratelimit\"]: # kbps in xen is", "self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def _host_info(self, host_ref): host =", "if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return net_ref[0]", "_network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec =", "pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref,", "vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False,", "self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref", "{}, \"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"]", "\"network\" in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs", "self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else:", ") return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) )", "\"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref,", "= net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"]", "snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return", "not use this file except in compliance with the License. # You may", "for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self, guest_id, snapshot_name=None):", "return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def", "= data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec", "errno import socket import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in self.connection.xenapi.host.get_all():", "self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id)", "self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return", "def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages = []", "= self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap =", "name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name,", "master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error def", "agreed to in writing, software # distributed under the License is distributed on", "False, False ) def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force: return", "null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref =", "storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record", "= self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"],", "vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd =", "= self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref =", "return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if", "( session_ref, task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream,", "1024 * 1024) ) ) def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date =", "[ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self,", "!= \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max'))", "snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name )", "self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages = [] for sr in self.connection.xenapi.SR.get_all_records().values(): if", "conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id)", "= self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else:", "storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref", "Copyright 2013 Locaweb. # All Rights Reserved. # # Licensed under the Apache", "response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref)", "str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk entity_info =", "= poolinfo self.format_for = Formatter() self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\" %", "session_ref = self.connection._session # FIXME: get real master master = self.poolinfo.get(\"api_server\") task_ref =", "= ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024", "\"export vm %s\" % guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % (", "* 1024 * 1024) ) ) def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date", "* 1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version')", "hdd = guestdata.get(\"hdd\") * 1024 * 1024 * 1024 new_disk_size = hdd -", "= self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref", "guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args", "_disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 *", "in guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" :", "snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec", "master = self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages", "1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\")", "other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self,", "return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self,", "vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref )", "storages = [] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and", "if \"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\"", "sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 *", "vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in data: net_refs = self._network_ref(data[\"network\"])", "self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024 * 1024), total_memory", "\"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if", "\"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices = [] for vif in", "len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try:", "\"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection = False", "new_disk_size *= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id)", "vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata:", "= self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref =", "to in writing, software # distributed under the License is distributed on an", "except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data: if data[\"ratelimit\"]:", "implied. # See the License for the specific language governing permissions and #", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "self.connection._session # FIXME: get real master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export", "# @author: <NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb. # @author: <NAME> (PotHix),", "vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data: if data[\"active\"]:", "FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter import re import", "str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref =", "+ int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream,", "= [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for", ") path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref ) try: conn =", "= self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = []", "- disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def", "if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\"", "'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"],", "self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics =", "total_memory = 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref)", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "the License. # # @author: <NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb. #", "LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\":", "ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name,", "@author: <NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb.", "in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory = guestdata[\"memory\"] if not", "pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\"", "str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max)", "= self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024 * 1024), total_memory / (1024", "'OpaqueRef:NULL': return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self):", "not None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id):", "self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id)", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "<NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from", "else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] =", "if \"network\" in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] =", "guest_id, data): \"\"\" Data should contain at least a network key: {\"network\": \"THE", "def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self,", "disks = self.get_disks(vm_ref) return [self._disk_info(d) for d in disks] def disk_create(self, guest_id, data):", "0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return net_ref[0] def _network_get_pifs(self, name):", "- on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs", "self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self,", "self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass", "vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda", "action=not_found\" % uuid) return None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host(", "guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\")", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "/ (1024 * 1024), self.get_disks_size(vm_ref) / (1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date,", "self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id):", "self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory =", "str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\"", "vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in", "disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref =", "_host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def", "return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref)", ") def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None", "for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi =", "disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec =", "in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host", "self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id)", "vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps", "\"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref,", "in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def", "self.connection.xenapi.network.get_all() ret = [] for net in net_refs: ret.append({\"id\": net}) return ret def", "'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout()", "Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import", "self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"]", "connect to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "[] for net in net_refs: ret.append({\"id\": net}) return ret def network_info(self, net_ref): return", "> 0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return", "in range(next_device): if device not in devices: next_device = device break vbd_rec =", "def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot(", "if free_space > max_free_space: max_free_space = free_space storage_ref = sr_ref if vm_size and", "force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def", "guest_id): snaps = [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return", "vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] ==", "vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"],", "None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"],", "else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record in storages.iteritems(): free_space", "vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory", ") ) def host_list(self): hosts = [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]})", "def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if", ") def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref))", "vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True:", "return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref,", "actually kBps rate = data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" )", "def guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown(", "vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not", "1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {},", "in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for device in range(next_device): if", "guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]:", "= self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id)", "EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return net_ref[0] def _network_get_pifs(self, name): ref =", "self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name)", "\"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\" in data: vif_record[\"network\"] =", "disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024", "if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref,", "tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def", "} vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup)", "self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: # If host is slave, connect", "break vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\":", "vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap", "str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref =", "* 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def", "session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref =", "int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space = free_space storage_ref =", "% name) return net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def", "1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec)", "next_device = device break vif_record = { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True,", "0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory +=", "in net_refs: ret.append({\"id\": net}) return ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\":", "vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if", "new_attributes = {} if \"network\" in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] !=", "= self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref): sr", "to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error", "if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages", "storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record in storages.iteritems(): free_space =", "devices: next_device = device break vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\":", "m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return", "Exception(\"No storage space left for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\"", "* 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self):", "vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] )", "self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id)", "[self._network_interface_info(n) for n in vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data should contain", "self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def", "next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref,", "*= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return", "range(next_device): if device not in devices: next_device = device break vif_record = {", "in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self,", "= self._vm_ref(vm_id) if not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref)", "disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id,", "* 1024 * 1024 * 1024 new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"])", "in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data", "if \"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref)", "device not in devices: next_device = device break vif_record = { \"VM\": vm_ref,", "# All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id))", "class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def", "conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response,", "in devices: next_device = device break vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device),", "free_space > max_free_space: max_free_space = free_space storage_ref = sr_ref if vm_size and vm_size", "err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref =", "self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size", "* 1024 * 1024), int(sr['physical_size']) / (1024 * 1024 * 1024) ) )", ") response = conn.getresponse() response.status response.read() except socket.error as err: if err.errno ==", "%s\" % guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref,", "self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return", "guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} )", "* 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id,", "self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 * 1024 * 1024", "data[\"name\"] ) if \"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 *", "for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return", "= self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs] def network_interface_create(self, guest_id, data): \"\"\"", "vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec)", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs", "if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if", "(PotHix), Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base", "disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024 * 1024), \"type\": \"system\", \"sharable\": False,", "= self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"]", "self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for d in disks] def disk_create(self, guest_id,", "max_free_space: raise Exception(\"No storage space left for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\",", "if vm_size and vm_size > 0 and vm_size > max_free_space: raise Exception(\"No storage", "self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def", "({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 *", "= self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref", "1024), int(sr['physical_size']) / (1024 * 1024 * 1024) ) ) def _vm_info(self, vm_ref):", "Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self,", "return storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests", "ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id)", "self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) )", "return net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name,", "vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL':", "at least a network key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id)", "return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id)", "used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref)", "def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master", "master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export", "snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ]", "\"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata:", "See the License for the specific language governing permissions and # limitations under", "self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec =", "and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"]", "int(data[\"size\"]) new_disk_size *= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None if vm[\"guest_metrics\"] !=", "not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref", "self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\"", "vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id):", "\"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\" in data: vif_record[\"network\"]", "self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref)", "is slave, connect to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect()", "net}) return ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref),", "self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return", "guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref)", "guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]:", "guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip =", "net_refs: ret.append({\"id\": net}) return ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref),", "data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and", "vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data should contain at least a network", "^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if", "self._host_info(host_ref) def storage_list(self): storages = [] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is", "simplestack.presenters.formatter import Formatter import re import errno import socket import httplib import logging", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if", "return net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n)", "= { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection", "if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error def logout(self):", "self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL'", "network: %s\" % name) return net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name) return", "master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024 * 1024), total_memory /", "net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" %", "} if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref)", "for device in range(next_device): if device not in devices: next_device = device break", "self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\" in", "!= data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]:", "@author: <NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound", "network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s)", "vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for device in range(next_device):", "guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs]", "return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def _host_info(self, host_ref): host", "a network key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices =", "guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id)", "= self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests = [] for vm in self.connection.xenapi.VM.get_all_records().values():", "pass if \"ratelimit\" in data: if data[\"ratelimit\"]: # kbps in xen is actually", "vm_ref): size = 0 for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size", "<NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb. from", "= { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max", "!= \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def", "guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force: return", "size = 0 for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def", "[] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for device", "= None ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"])", "\"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet", "snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed':", "\"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\"", "for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref =", "False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] }) if", "in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice']", "self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\",", "= self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self, vm_ref): for vdi", "max(devices) + 1 for device in range(next_device): if device not in devices: next_device", "self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return", "def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = [] for net in net_refs: ret.append({\"id\":", "guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref", "def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec =", "vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] *", "!= net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]:", "memory, \"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max =", "\"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\",", "and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id)", "KIND, either express or implied. # See the License for the specific language", "vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for device in range(next_device):", "self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref", "conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse()", "vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def", "return vif_ref entity_info = \"%s - on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info)", "guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id, force=False): vm_ref =", "else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref)", "vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] !=", "\"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass", "= self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id)", "data): \"\"\" Data should contain at least a network key: {\"network\": \"THE NETWORK", "self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if", "if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] !=", "def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests = []", "if not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref)", "1024 * 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return(", "ANY KIND, either express or implied. # See the License for the specific", "Formatter() self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password(", "vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id): disk_id", "guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get real master master", "* 1024), int(sr['virtual_allocation']) / (1024 * 1024 * 1024), int(sr['physical_size']) / (1024 *", "return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self,", "1024 * 1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {},", "from simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack", "net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref)", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "return self._host_info(host_ref) def storage_list(self): storages = [] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"]", "key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0 for vdi in self.get_disks(vm_ref):", "vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs] def", "\"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\" in data:", "storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests = [] for vm", "in vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data should contain at least a", "in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024", "else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data: if data[\"ratelimit\"]: # kbps", "self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start(", "self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref", "vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref =", "def storage_list(self): storages = [] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not", "\"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\":", "raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return net_ref[0] def _network_get_pifs(self, name): ref", "new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] =", "vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size", "guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id,", "(response, response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d)", "import SimpleStack from simplestack.presenters.formatter import Formatter import re import errno import socket import", "vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in", "= self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref,", "host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec", "error: # If host is slave, connect to master if 'HOST_IS_SLAVE' in str(error):", "new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec", "= self._vm_ref(guest_id) devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices)", "= self._vm_ref(guest_id) devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices)", "vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref:", "is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\"", ": self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max):", "order\" ) if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd", "else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref,", "= { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\":", "} def __init__(self, poolinfo): self.connection = False self.poolinfo = poolinfo self.format_for = Formatter()", "size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for", "net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in", "if device not in devices: next_device = device break vbd_rec = { \"VM\":", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None", "(1024 * 1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version',", "vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref", "= [] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id':", "{}).get('product_version') } ) ) def host_list(self): hosts = [] for h in self.connection.xenapi.host.get_all_records().values():", "!= data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except:", "\"\", \"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated", "disk entity_info = \"%s - on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def", "int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) / (1024 * 1024 * 1024),", "= guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if", "vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info", "disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024), disk_rec.get(\"uuid\") ) )", "data[\"ratelimit\"]: # kbps in xen is actually kBps rate = data[\"ratelimit\"] / (8", "= self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages = [] for sr in self.connection.xenapi.SR.get_all_records().values():", "None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if", "host_list(self): hosts = [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def", "return self._storage_info(sr_ref) def guest_list(self): guests = [] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not", "vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\",", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref", "under the License. # # @author: <NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb.", "job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref ) conn =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return", "total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } )", "else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref", "snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for", "in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec =", "{}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else:", "applicable law or agreed to in writing, software # distributed under the License", "= { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\":", "1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec):", "vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref)", "in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] =", "vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def", "self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory", "* 1024) ) ) def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "\"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented()", "def connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\")", "return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def", "= self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return", "(1024 * 1024 * 1024) ) ) def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref)", "self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get", "= [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def", "self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get real master master = self.poolinfo.get(\"api_server\") task_ref", ") except Exception, error: # If host is slave, connect to master if", "if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info = \"%s - on Guest\" %", "self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref)", "real master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\" % guest_id,", "hosts = [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self,", "writing, software # distributed under the License is distributed on an \"AS IS\"", "vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id): vm =", "media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\":", "def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id):", "= self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref", "self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] =", "return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id):", "\"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) >", "null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref =", "def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref)", "compliance with the License. # You may obtain a copy of the License", "self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id,", "for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk entity_info = \"%s", "self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref =", "self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref", "snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def", "return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata:", "vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref =", "simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from", "= self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec =", "return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref,", "\"BIOS order\" ) if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref)", "= 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id)", "poolinfo): self.connection = False self.poolinfo = poolinfo self.format_for = Formatter() self.connect() def connect(self):", "self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: # If host is slave, connect to", "self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref,", "and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"]", "quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\")", "vlan, other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0],", "\"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\"", "License. # # @author: <NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb. # @author:", "[self._disk_info(d) for d in disks] def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices", "\"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise", "guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] )", "\"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"]", "self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description,", "return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id)", "record in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space >", "= None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest(", "vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def", "= self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\":", "def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id, data):", "task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self,", "def pool_info(self): used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and", "tools_up_to_date = None ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\", "= str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\" in guestdata: vcpus =", "entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return", "ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date =", "self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force:", "return {\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = [] for net", "data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data:", "self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\":", "\"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\",", ") else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"]", "entity_info = \"%s - on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self,", "disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] )", "(the \"License\"); # you may not use this file except in compliance with", "re import errno import socket import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class", "in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk entity_info = \"%s - on", "# Unless required by applicable law or agreed to in writing, software #", "None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"],", "disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self,", "if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record)", "by applicable law or agreed to in writing, software # distributed under the", "self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref", "= self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"]", "raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def", "% (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for", "= None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"]", "def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id,", "= self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info = \"%s - on", "in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if", "vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass", "= self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"]", "vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record(", ": memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits(", "file except in compliance with the License. # You may obtain a copy", "= max(devices) + 1 for device in range(next_device): if device not in devices:", "data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in", "return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024), disk_rec.get(\"uuid\")", "self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode':", "= [] for net in net_refs: ret.append({\"id\": net}) return ret def network_info(self, net_ref):", "in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info =", "self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size *=", "vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info = \"%s -", "= self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in data", "self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is", "next_device = device break vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False,", "vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def", "disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref)", "logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not", "in guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory = { \"memory_target\" :", "int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec =", "not in devices: next_device = device break vif_record = { \"VM\": vm_ref, \"device\":", "data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return", "name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret =", "{}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"])", "str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return", "guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self,", "self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory", "disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for d in", "def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) /", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref", "job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref ) try: conn", "if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\"", "= vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self,", "parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" )", "= self.connection._session # FIXME: get real master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create(", "in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space > max_free_space:", "self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False,", "= self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\":", "str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in guestdata: memory_target", "= sr_ref if vm_size and vm_size > 0 and vm_size > max_free_space: raise", "pass if \"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try:", ") return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def", "return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return", "self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi", ", \"memory_static_min\" : memory, \"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min =", "device break vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\",", "action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref)", "guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for d in disks]", "\"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"):", "\"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse() response.status response.read() except socket.error", "self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id)", "if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory = guestdata[\"memory\"]", "Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version", "> 0 and vm_size > max_free_space: raise Exception(\"No storage space left for importing\")", "disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in data:", "/ (1024 * 1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version':", "vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"]", "self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024), disk_rec.get(\"uuid\") )", "\"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\": \"New Disk\",", "import Formatter import re import errno import socket import httplib import logging LOG", "_network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return", "* 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self,", "memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref,", "guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref,", "\"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def", "ret = [] for net in net_refs: ret.append({\"id\": net}) return ret def network_info(self,", "snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self,", "vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice']", "1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec): return( self.format_for.disk(", "str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {},", "self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref,", "0 and vm_size > max_free_space: raise Exception(\"No storage space left for importing\") task_ref", "= self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps =", "self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self, guest_id,", "vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] =", "None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] )", "str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"]", "network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = [] for net in net_refs: ret.append({\"id\": net})", "= str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref,", "False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\": \"New", "self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config})", "\"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref,", ") def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) )", "data: if data[\"ratelimit\"]: # kbps in xen is actually kBps rate = data[\"ratelimit\"]", "def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def", ") ) def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'],", "raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for vm_rec in", "hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def", "hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages =", "\"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\"", "in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for device in range(next_device): if", "Data should contain at least a network key: {\"network\": \"THE NETWORK NAME\"} \"\"\"", "self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name:", "if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for", "str(data[\"size\"] * 1024 * 1024 * 1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False,", "vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI'])", "conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse() response.status response.read() except", "( self.format_for.pool( used_memory / (1024 * 1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"],", "vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id,", "guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if", "= self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec =", "guestdata[\"memory\"] if not isinstance(memory,dict): memory = { \"memory_target\" : memory , \"memory_static_min\" :", "= self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref", "try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response =", "and vm_size > max_free_space: raise Exception(\"No storage space left for importing\") task_ref =", "(1024 * 1024 * 1024), int(sr['physical_size']) / (1024 * 1024 * 1024) )", "= ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space = free_space", "False, False ) def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] )", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref)", "None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'),", "self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0:", "new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref)", "vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref =", ") def host_list(self): hosts = [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return", "- on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return", "self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self,", "= str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id,", "used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']:", "(1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def", "host is slave, connect to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3]", "guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict):", "= self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return", "sr_ref if vm_size and vm_size > 0 and vm_size > max_free_space: raise Exception(\"No", "def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec)", "return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"])", "return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session # FIXME:", "import XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter", "path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master)", "get real master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\" %", "str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {}", "disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id)", "vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self, vm_ref):", "\"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection = False self.poolinfo = poolinfo", "\"import vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref", "cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self,", "self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = [] for", "self._vm_ref(vm_id) if not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"])", "guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] !=", "= self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref", "s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self, guest_id, snapshot_name=None): if", "network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref)", "self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref =", ": vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if", "sr_ref, record in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space", "% (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s", "storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def", "session_ref, task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\":", "def _network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def", "\"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref ) try:", "the License for the specific language governing permissions and # limitations under the", "vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref)", "in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\"", "\"Unknown network: %s\" % name) return net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name)", "disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024), disk_rec.get(\"uuid\") ) ) def", "else: raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for vm_rec", "self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for device in range(next_device): if device", "def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id, force=False):", "def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks", "iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref", "{} } if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try:", "\"qos_algorithm_params\": {} } if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record)", ") ) def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip =", "tag_name) def get_disks(self, vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in", "self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref)", "self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return", "return ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\":", "guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if", "= self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in data: net_refs = self._network_ref(data[\"network\"]) if", "disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", ") conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return", "if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if", ") self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1]", "max_free_space: max_free_space = free_space storage_ref = sr_ref if vm_size and vm_size > 0", "vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref =", "self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else:", "self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host", "= self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id):", "from simplestack.presenters.formatter import Formatter import re import errno import socket import httplib import", "task_ref = self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export job\" ) path =", "vm_ref = self._vm_ref(vm_id) if not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap =", "socket import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = {", "# kbps in xen is actually kBps rate = data[\"ratelimit\"] / (8 *", "self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id,", "used_memory / (1024 * 1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], {", "def get_disks(self, vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']:", "self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id)", "{\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs =", "}) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if", "self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id)", "self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 * 1024 * 1024 new_disk_size = hdd", "= self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\":", "= self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record in storages.iteritems(): free_space = (", "self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record in storages.iteritems(): free_space = ( int(record[\"physical_size\"])", "def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if", "self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: # If host", "snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm", "Version 2.0 (the \"License\"); # you may not use this file except in", "self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template =", "\"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024 * 1024), \"type\": \"system\",", "\"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"])", "media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"]", "(disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref", "sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024", "disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref =", "== disk_id: return disk entity_info = \"%s - on Guest\" % (disk_id) raise", "else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False", "1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self,", "\"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\":", "= self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return", "sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 * 1024), int(sr['virtual_allocation']) / (1024 *", "sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) > 0: storages.append({'id':", "entity_info = \"%s - on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self,", "slave, connect to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else:", "Locaweb. # @author: <NAME> (morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb. from simplestack.utils", "vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\" in guestdata:", "return size def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for disk in self.get_disks(vm_ref):", "> max_free_space: max_free_space = free_space storage_ref = sr_ref if vm_size and vm_size >", "True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if", "master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export job\"", "self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests = [] for vm in self.connection.xenapi.VM.get_all_records().values(): if", "self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self,", "True: pv_args = \"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref,", "data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if", "1024), int(sr['virtual_allocation']) / (1024 * 1024 * 1024), int(sr['physical_size']) / (1024 * 1024", ": memory , \"memory_static_min\" : memory, \"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20)", "is True: pv_args = \"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\")", "self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def", "1024 new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def", "EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter import re import errno", "network key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices = []", "else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^", "self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return", "master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self): hosts = []", "return disk entity_info = \"%s - on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info)", "= httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size)", "try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data:", "= self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self,", "'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref", "= { \"memory_target\" : memory , \"memory_static_min\" : memory, \"memory_static_max\" : memory }", "else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" )", "not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup", "1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'),", "for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for device in", ") if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args =", "1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') }", "disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"]", "return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone(", "if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref,", "raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data:", "for sr_ref, record in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if", "self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref)", "guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in", "in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0:", "str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if", "= self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in", "(1024 * 1024 * 1024), int(sr['virtual_allocation']) / (1024 * 1024 * 1024), int(sr['physical_size'])", "return hosts def host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages", "= logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\"", "vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi)", ") try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response", "self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for s", "= self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs] def network_interface_create(self,", "memory_target ) if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus", "sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages def", "False ) def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref)", "self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed':", "\"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if", "disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk entity_info = \"%s -", "1024 * 1024 * 1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {},", "\"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 * 1024 * 1024", "_network_create(self, name, description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self,", "kbps in xen is actually kBps rate = data[\"ratelimit\"] / (8 * 1024)", "XenAPI from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import", "host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024", "vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0", "import re import errno import socket import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen')", ") ) def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None}", "in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\" in", "{}, \"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks =", "net_ref = self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan))", "specific language governing permissions and # limitations under the License. # # @author:", "> max_free_space: raise Exception(\"No storage space left for importing\") task_ref = self.connection.xenapi.task.create( \"import", "vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm)", "self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self,", "\"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\":", "self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records()", "* 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref,", "{}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\":", "\"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {}", "/ (1024 * 1024 * 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot", "OF ANY KIND, either express or implied. # See the License for the", "* 1024), self.get_disks_size(vm_ref) / (1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')],", "from_network, vlan, other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref =", "ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)}", "self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) / (1024 *", "from simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter", "\"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref", ") def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id,", "= self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in data:", "self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in data: net_refs", "vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')),", "vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info = \"%s", "socket.error as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec", "poolinfo self.format_for = Formatter() self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\")", "1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self): hosts", "guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template", "name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = [] for net in net_refs:", "disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\"", "= None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space =", "vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"])", "should contain at least a network key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref", "network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except:", "vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\":", "vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for d in disks] def", "\"\"\" Data should contain at least a network key: {\"network\": \"THE NETWORK NAME\"}", "task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % (", "= conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id): vm_ref =", "not in devices: next_device = device break vbd_rec = { \"VM\": vm_ref, \"userdevice\":", "self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"]", "vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id):", "vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"]", "{ 'version': master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self): hosts = [] for", "network_interface_id: return vif_ref entity_info = \"%s - on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\",", "[] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')})", "( session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse()", "self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']:", "_snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self,", "self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data: new_disk_size", "1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data):", "self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else: name", "size def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for disk in self.get_disks(vm_ref): if", "% uuid) return None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'],", "= self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi:", "= 0 for sr_ref, record in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"])", "in disks] def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices = [] for", "= self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data):", "self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice']))", "disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref)", "or agreed to in writing, software # distributed under the License is distributed", "return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self,", "net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for", "data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\"", "guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\"", "= self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get real master master = self.poolinfo.get(\"api_server\")", "vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref", "sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests = [] for vm in", "in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory =", "data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label(", "self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS", "import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter import re", "+= int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec", "key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices = [] for", "License. # You may obtain a copy of the License at # #", "1024 * 1024 new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return", "\"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection = False self.poolinfo = poolinfo self.format_for", "logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\",", "return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 * 1024), int(sr['virtual_allocation'])", "d in disks] def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices = []", "else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref =", "snaps def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap =", "[] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for device", "vm_size > max_free_space: raise Exception(\"No storage space left for importing\") task_ref = self.connection.xenapi.task.create(", "httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size) def", "= device break vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\":", "disks] def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices = [] for vbd", "# limitations under the License. # # @author: <NAME>, Locaweb. # @author: <NAME>", "space left for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path", "memory = { \"memory_target\" : memory , \"memory_static_min\" : memory, \"memory_static_max\" : memory", "return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref =", "network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"]", "description, \"other_config\": other_config}) def network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref = self._network_create(name,", "\"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": []", "guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self,", ") if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "response.read() except socket.error as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else:", "self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None):", ") return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) /", "return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name, description, from_network, vlan,", ") if \"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 * 1024", "\"%s - on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try:", "self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref):", "int(sr['physical_size']) / (1024 * 1024 * 1024) ) ) def _vm_info(self, vm_ref): vm", "sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self):", "and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref =", "= hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id):", "description, other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def", "None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface(", "self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref", "_cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if", "uuid) return None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'],", "storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref,", "} ) ) def host_list(self): hosts = [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id':", "vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref)", "\"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\":", "master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self): hosts = [] for h in", "self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"]", "License, Version 2.0 (the \"License\"); # you may not use this file except", "int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for disk in", "memory_target ) if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target", "NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref):", "guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False):", "conn.getresponse() response.status response.read() except socket.error as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import", "\"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec", "def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and", "self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref", "FIXME: get real master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm %s\"", "vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks = [] vm =", "network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record =", "sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 * 1024), int(sr['virtual_allocation']) / (1024 * 1024", "if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref =", "self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024 * 1024), total_memory / (1024 *", "_network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network:", "= guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"]", "/ (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } ) )", "\"virtual_size\": str(data[\"size\"] * 1024 * 1024 * 1024), \"type\": \"system\", \"sharable\": False, \"read_only\":", "\"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] })", "guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in", "guestdata.get(\"hdd\") * 1024 * 1024 * 1024 new_disk_size = hdd - disks_size +", "self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"]", "return [self._network_interface_info(n) for n in vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data should", "media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref", "_delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref):", "vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id):", "= self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "* 1024 * 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref)", "!= null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref", "vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref):", "\"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\":", "vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get real master master =", "self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 * 1024),", "self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in", "= free_space storage_ref = sr_ref if vm_size and vm_size > 0 and vm_size", "\"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] =", "0 for sr_ref, record in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) )", "\"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\" in guestdata: disk =", "storage_ref = None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space", "host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages = [] for sr in", "vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup)", "path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\",", "None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0", "vm_size > 0 and vm_size > max_free_space: raise Exception(\"No storage space left for", "self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if", "None ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date", "generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024 * 1024), \"type\": \"system\", \"sharable\":", "* 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot(", "next_device = max(devices) + 1 for device in range(next_device): if device not in", "None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all()", "self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024),", "= [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if", "try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data: if data[\"ratelimit\"]: # kbps in", "pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self): hosts =", "host_info(self, host_id): host_ref = self.connection.xenapi.host.get_by_uuid(host_id) return self._host_info(host_ref) def storage_list(self): storages = [] for", "in str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout() def", "vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\")", "else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret", "pool_info(self): used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not", "_network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref)", "iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref", "data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref", "LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return(", "str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref", "or implied. # See the License for the specific language governing permissions and", "guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks = []", "@author: <NAME> (morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI", "vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref =", "_vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def", "memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target )", "vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec)", "FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"]", "for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref):", "= guestdata.get(\"hdd\") * 1024 * 1024 * 1024 new_disk_size = hdd - disks_size", "def host_list(self): hosts = [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts", "left for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path =", "self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for device in range(next_device): if device", "] return snaps def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now())", "Locaweb. # @author: <NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions import", "* 1024 * 1024), int(sr['virtual_allocation']) / (1024 * 1024 * 1024), int(sr['physical_size']) /", "self.format_for = Formatter() self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") )", "vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] ==", "1024 * 1024 * 1024 new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"],", "task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size =", "and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try:", "network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in", "} memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min,", "= self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self,", "guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet console=hvc0\" else: pv_args =", "== 'OpaqueRef:NULL': return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name} def", "(1024 * 1024 * 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot =", "cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label(", "snapshot_info(self, guest_id, snapshot_id): snap = self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id))", "[] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) >", "guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref)", "importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" %", "description, other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name, description,", "(not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id): vm", "self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return(", "use this file except in compliance with the License. # You may obtain", "return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id)", "if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return", "len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return net_ref[0] def", "== 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return net_ref[0] def _network_get_pifs(self,", "if not isinstance(memory,dict): memory = { \"memory_target\" : memory , \"memory_static_min\" : memory,", "= self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for d in disks] def disk_create(self,", "network_interface_create(self, guest_id, data): \"\"\" Data should contain at least a network key: {\"network\":", "self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for", "response_size = response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks", ") try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: # If host is", "vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info = \"%s - on Guest\" % (network_interface_id)", "guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id, force=False): vm_ref", "guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref)", "> int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata:", "for the specific language governing permissions and # limitations under the License. #", "snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id),", "self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template:", "vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] !=", "task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size}", "False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"): raise", "False ) def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return", "guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) )", "net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"]", "net_refs = self.connection.xenapi.network.get_all() ret = [] for net in net_refs: ret.append({\"id\": net}) return", "guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in", "\"\"\" vm_ref = self._vm_ref(guest_id) devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device", "self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id,", "tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'),", "response.status response.read() except socket.error as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\")", "def __init__(self, poolinfo): self.connection = False self.poolinfo = poolinfo self.format_for = Formatter() self.connect()", "{\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices = [] for vif", "\"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection = False self.poolinfo", "storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests = [] for", "if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, \"\") if \"template\" in guestdata:", "+= int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory /", "= str(error).split(\"'\")[3] self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory =", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "in devices: next_device = device break vif_record = { \"VM\": vm_ref, \"device\": str(next_device),", "total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory", "guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref ==", "guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get real master", "== network_interface_id: return vif_ref entity_info = \"%s - on Guest\" % (network_interface_id) raise", "self.poolinfo.get(\"password\") ) except Exception, error: # If host is slave, connect to master", "parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\" ) else: self.connection.xenapi.VM.set_ha_restart_priority(vm_ref,", "if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\"", "devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1", "= self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"],", "vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref def", "/ (1024 * 1024 * 1024), int(sr['physical_size']) / (1024 * 1024 * 1024)", "break vif_record = { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\":", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id):", "guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return", "{\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref =", "response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for", "= self.connection.xenapi.network.get_all() ret = [] for net in net_refs: ret.append({\"id\": net}) return ret", "except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for s in", "self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs] def network_interface_create(self, guest_id,", "= conn.getresponse() response.status response.read() except socket.error as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET", "if \"size\" in data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 * 1024 *", "other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref,", "# @author: <NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions import FeatureNotImplemented,", "def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"])", "- int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space = free_space storage_ref = sr_ref", "= Formatter() self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try:", ": memory, \"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max", "(network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\"", "snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None,", "vm %s\" % guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref,", "network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"],", "= self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref)", "met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec", "data: new_disk_size = int(data[\"size\"]) new_disk_size *= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size))", "int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024), disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref):", "vm_ref, \"BIOS order\" ) if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size =", "with the License. # You may obtain a copy of the License at", "guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref )", "= vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id):", "return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id)", "guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None", "host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage(", "\"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in", "ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'),", "permissions and # limitations under the License. # # @author: <NAME>, Locaweb. #", "description, from_network, vlan, other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network) ref", "= \"%s - on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref,", "if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet console=hvc0\" else: pv_args", ") ) def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref: return for", "= self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 * 1024 * 1024 new_disk_size =", "if \"ratelimit\" in data: if data[\"ratelimit\"]: # kbps in xen is actually kBps", "return ( self.format_for.pool( used_memory / (1024 * 1024), total_memory / (1024 * 1024),", "\"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} }", "session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response = conn.getresponse() response_size", "self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return", "law or agreed to in writing, software # distributed under the License is", "int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref,", "= \"%s - on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid):", "vm_size, storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None if storage_id:", "for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref", "except Exception, error: # If host is slave, connect to master if 'HOST_IS_SLAVE'", "str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target", "if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref", "guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master =", ") def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 *", "memory_target, memory_target ) if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target,", "if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\"", "def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if", "net in net_refs: ret.append({\"id\": net}) return ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref),", "self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"]))", "media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id)", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref):", "= response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks =", "= self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 *", "+ 1 for device in range(next_device): if device not in devices: next_device =", "= self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref,", "kBps rate = data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params(", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self, guest_id, snapshot_name=None): if not", "sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0 for vdi in", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"]", "self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd", "storage space left for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\" )", "= self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id):", "= disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref", "data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data and", "\"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024", "guests = [] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')):", "\"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref =", "= self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data):", "vm.get('uuid')}) return guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self,", ") return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\" in", "def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown", "self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data: if data[\"ratelimit\"]: # kbps in xen", "vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref:", "name, description, from_network, vlan, other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref = self._network_get_pifs(from_network)", "= \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request(", "data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id), data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id, guestdata):", ") self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self,", "memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref,", "int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) / (1024 * 1024 * 1024), vm[\"PV_args\"],", "for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"]) > 0:", "network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\" in data: net_refs =", "% ( session_ref, task_ref, storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path,", "self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] != null_ref: self.connection.xenapi.VBD.eject(cd_ref) def media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref)", ") def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else:", "_vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None if vm[\"guest_metrics\"]", "'version': master_rec.get('software_version', {}).get('product_version') } ) ) def host_list(self): hosts = [] for h", "= self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id)", "if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if", "NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif)))", "for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total'])", "task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session", "simplestack.exceptions import FeatureNotImplemented, EntityNotFound from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter import", "connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") )", "vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs] def network_interface_create(self, guest_id, data):", "try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except:", "vm_ref, disk_id): disk_id = str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id:", "= guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"}", "for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\"", "= self.poolinfo.get(\"api_server\") storage_ref = None if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages =", "{} } vdi_rec = ({ \"name_label\": \"New Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\":", "+= int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for disk", "try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if", "get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host =", "the specific language governing permissions and # limitations under the License. # #", "= self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref)", "self._vm_ref(guest_id) devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) +", "disk_id: return disk entity_info = \"%s - on Guest\" % (disk_id) raise EntityNotFound(\"Disk\",", "vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\"", "vif_record = { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\",", "if \"network\" in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except:", "device break vif_record = { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\",", "\"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except:", "= self.connection.xenapi.task.create( \"export vm %s\" % guest_id, \"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\"", "{} if \"network\" in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"]", "self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id) if", "new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self,", "if \"memory\" in guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory = {", "pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots(", "self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] )", "vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref = None if", "vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests", "= self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 * 1024 *", "self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\")", "pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data: if data[\"ratelimit\"]: #", "return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None,", "\"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) !=", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "\"xenstore_data\": {}, \"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks", "vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref,", "state_translation = { \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo):", "for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for device in", "* 1024), int(sr['physical_size']) / (1024 * 1024 * 1024) ) ) def _vm_info(self,", "self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name) return", "data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if", "= \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path)", "for d in disks] def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices =", "self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id)", "= device break vif_record = { \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\":", "data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref)", "self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if", "import socket import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation =", "host ) ) def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) /", "return (response, response_size) def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return", "memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in", "self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if", "( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space = free_space storage_ref", "data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"]", "= int(data[\"size\"]) new_disk_size *= 1024 * 1024 * 1024 self.connection.xenapi.VDI.resize(disk_rec[\"ref\"], str(new_disk_size)) disk_rec =", "pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024 *", "return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0 for vdi", "try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for", "2013 Locaweb. # All Rights Reserved. # # Licensed under the Apache License,", "this file except in compliance with the License. # You may obtain a", "as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec =", "disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description(", "in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet console=hvc0\"", "= self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 *", "vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata:", "net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if", "\"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self,", "in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data", "self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters", "\"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory = guestdata[\"memory\"] if", "= self._vm_ref(snapshot_id) return self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id):", "name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref", "media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"]", "= data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device)", "and vm_size > 0 and vm_size > max_free_space: raise Exception(\"No storage space left", "if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in", "if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) }", "return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref", "SimpleStack from simplestack.presenters.formatter import Formatter import re import errno import socket import httplib", "\"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\":", "tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name)", "{'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return(", "def guest_list(self): guests = [] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and", "network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref =", "self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id, data): vm = self.connection.xenapi.VM.clone( self._vm_ref(guest_id),", "def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None if", "if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref)", "= self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id)", "error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values():", "\"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024 * 1024), \"type\":", "not isinstance(memory,dict): memory = { \"memory_target\" : memory , \"memory_static_min\" : memory, \"memory_static_max\"", "(1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"], { 'version': master_rec.get('software_version', {}).get('product_version') } ) ) def", "in data: if data[\"ratelimit\"]: # kbps in xen is actually kBps rate =", "sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 * 1024), int(sr['virtual_allocation']) / (1024", "host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return(", "vbd_rec = { \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\",", "self.connection.xenapi.task.create( \"import vm\", \"import job\" ) path = \"/import?session_id=%s&task_id=%s&sr_id=%s\" % ( session_ref, task_ref,", "= guestdata[\"memory\"] if not isinstance(memory,dict): memory = { \"memory_target\" : memory , \"memory_static_min\"", "data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data: new_disk_size = int(data[\"size\"])", "vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else:", "str(error).split(\"'\")[3] self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0", "1 for device in range(next_device): if device not in devices: next_device = device", "int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space = free_space storage_ref = sr_ref if", "guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\": host =", "vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'),", "vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref)", "self.format_for.pool( used_memory / (1024 * 1024), total_memory / (1024 * 1024), pool_rec[\"uuid\"], master_rec[\"address\"],", "in range(next_device): if device not in devices: next_device = device break vif_record =", "vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {}", "in xen is actually kBps rate = data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type(", "self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs =", "free_space storage_ref = sr_ref if vm_size and vm_size > 0 and vm_size >", "not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0 for host_ref", "xen is actually kBps rate = data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref,", "self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass if \"ratelimit\" in data: if", "snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self,", "0 for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory +=", "for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref", "tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name): vm_ref", "def _network_interface_info(self, vif_ref): vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec", "len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id)", "def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref: return for snap_ref in", "= 0 for host_ref in self.connection.xenapi.host.get_all(): met_ref = self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory", "guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def", "\"ratelimit\" in data: if data[\"ratelimit\"]: # kbps in xen is actually kBps rate", "* 1024 * 1024 new_disk_size = hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size))", "vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"] ) ) def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if", "is not None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self,", "= re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id)", ")[0] self.connection.xenapi.VBD.insert(cd_ref, iso_ref) else: self.media_unmount(guest_id) def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref =", "data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)}", "\"other_config\": other_config}) def network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref = self._network_create(name, description,", "return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self,", "if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id):", "except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in", "storage_list(self): storages = [] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None", "vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self, vm_ref): for vdi in self.get_disks(vm_ref): self.connection.xenapi.VDI.destroy(vdi['ref'])", ") def _delete_vm(self, vm_id): vm_ref = self._vm_ref(vm_id) if not vm_ref: return for snap_ref", "console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params(", "if device not in devices: next_device = device break vif_record = { \"VM\":", ") def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation'])", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "device in range(next_device): if device not in devices: next_device = device break vif_record", "\"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec =", "\"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\"", "_disk_rec(self, vm_ref, disk_id): disk_id = str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] ==", "self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in", "self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id)", "vif_ref entity_info = \"%s - on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def", "max_free_space = free_space storage_ref = sr_ref if vm_size and vm_size > 0 and", "guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force:", "\"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20)", "self.connection = False self.poolinfo = poolinfo self.format_for = Formatter() self.connect() def connect(self): self.connection", "Formatter import re import errno import socket import httplib import logging LOG =", "False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec = ({", "guest_id, snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name", "new_attributes[\"network\"] = net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"] != data[\"locking_mode\"]: new_attributes[\"locking_mode\"] =", "vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse() response.status response.read() except socket.error as err:", "required by applicable law or agreed to in writing, software # distributed under", "disk_id = str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk", "raise Exception(\"No storage space left for importing\") task_ref = self.connection.xenapi.task.create( \"import vm\", \"import", "disk_id): disk_id = str(disk_id) for disk in self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return", "= [] for sr in self.connection.xenapi.SR.get_all_records().values(): if sr[\"PBDs\"] is not None and len(sr[\"PBDs\"])", "disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024),", "self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] =", "= data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\":", "name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\"", "name) return net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self,", "tag_create(self, guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id,", ") ) def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024", "self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for", "snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name):", "ret.append({\"id\": net}) return ret def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\":", "limitations under the License. # # @author: <NAME>, Locaweb. # @author: <NAME> (morellon),", "self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref =", "snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self, guest_id, tag_name): vm_ref =", "def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) )", "memory , \"memory_static_min\" : memory, \"memory_static_max\" : memory } memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'ipv6_allowed': None} vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref)) network_rec = self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"],", "# FIXME: get real master master = self.poolinfo.get(\"api_server\") task_ref = self.connection.xenapi.task.create( \"export vm", "media_info(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref", "self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata:", "self.connection.xenapi.network.get_record( vif_rec[\"network\"] ) return( self.format_for.network_interface( vif_rec[\"MAC\"], vif_rec[\"device\"], vif_rec[\"MAC\"], network_rec[\"name_label\"], vif_rec[\"locking_mode\"], vif_rec[\"ipv4_allowed\"], vif_rec[\"ipv6_allowed\"], vif_rec[\"qos_algorithm_params\"]", "self._vm_ref(guest_id) devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) +", "rate = data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref,", "self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref =", "XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error:", "\"\", \"MTU\": \"0\", \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } if \"network\" in", "self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority( vm_ref, \"best-effort\"", "if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\"", "response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id): vm_ref", "Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except:", "self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record in storages.iteritems():", "if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self,", "0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref)", "except: pass if \"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else:", "network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref =", "in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id): disk_id =", "return None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address']", "storages.iteritems(): free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space", "self._vm_ref(guest_id) ) ] return snaps def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name", "is actually kBps rate = data[\"ratelimit\"] / (8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\"", "guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return self._vm_info(vm) def", "if iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\":", "self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd =", "self.connection.xenapi.VIF.get_record(vif_ref) if vif_rec[\"MAC\"] == network_interface_id: return vif_ref entity_info = \"%s - on Guest\"", "if \"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\")", "if data[\"ratelimit\"]: # kbps in xen is actually kBps rate = data[\"ratelimit\"] /", "in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref) except: pass else: try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass", "guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self,", "self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool(", "pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\":", "(morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI from simplestack.exceptions", "% self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: # If", "def disk_list(self, guest_id): vm_ref = self._vm_ref(guest_id) disks = self.get_disks(vm_ref) return [self._disk_info(d) for d", "False self.poolinfo = poolinfo self.format_for = Formatter() self.connect() def connect(self): self.connection = XenAPI.Session(", "= self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip = None if vm[\"guest_metrics\"] != \"OpaqueRef:NULL\": guest_metrics", "self.connection.xenapi.task.destroy(task_ref) return self._vm_info(vm_ref) def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session #", "data[\"name\"] ) return self._vm_info(vm) def guest_update(self, guest_id, guestdata): vm_ref = self._vm_ref(guest_id) if \"name\"", "!= data[\"locking_mode\"]: new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]:", "self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec", "import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\", \"Halted\":", "errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\",", "except socket.error as err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise", "# you may not use this file except in compliance with the License.", "path) response = conn.getresponse() response_size = response.getheader(\"Content-Length\") return (response, response_size) def disk_list(self, guest_id):", "def snapshot_list(self, guest_id): snaps = [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) )", "if guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"]", "if storage_id: storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for", "return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) / (1024", "def media_unmount(self, guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if", "vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def disk_update(self, guest_id, disk_id,", "pif_ref = self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id):", "== errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>',", "n in vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data should contain at least", "import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\":", "if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec)", "\"memory\" in guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory = { \"memory_target\"", "self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n in vif_refs] def network_interface_create(self, guest_id, data): \"\"\" Data", "vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.connection.xenapi.VIF.get_record(vif_ref) if", "isinstance(memory,dict): memory = { \"memory_target\" : memory , \"memory_static_min\" : memory, \"memory_static_max\" :", "force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self,", "on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs =", "memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range(", "guest_id): vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) null_ref = 'OpaqueRef:NULL' if self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] !=", "is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] ) if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"]", "for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max'])", "def get_disks_size(self, vm_ref): size = 0 for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"])", "vm_size and vm_size > 0 and vm_size > max_free_space: raise Exception(\"No storage space", "License for the specific language governing permissions and # limitations under the License.", "devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device = max(devices) + 1 for device in range(next_device): if device not", "disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0 for", "least a network key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref = self._vm_ref(guest_id) devices", "vbd = self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self, vm_ref): for", "\"License\"); # you may not use this file except in compliance with the", "def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False", "def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size')) / (1024 * 1024", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size,", "\"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None if vm[\"resident_on\"] != \"OpaqueRef:NULL\":", ") else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id)", "def network_interface_create(self, guest_id, data): \"\"\" Data should contain at least a network key:", "!= 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref)", "return snaps def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name = str(datetime.datetime.now()) snap", "self.connection.xenapi.host.get_record(host_ref) return( self.format_for.host( host['uuid'], host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref): sr =", "self.state_translation[vm.get('power_state')], host ) ) def _disk_info(self, disk_rec): return( self.format_for.disk( disk_rec.get('userdevice'), disk_rec.get('name_label'), disk_rec.get('userdevice'), int(disk_rec.get('virtual_size'))", "network_interface_id, data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes", "= XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception,", ") ] return snaps def snapshot_create(self, guest_id, snapshot_name=None): if not snapshot_name: snapshot_name =", "net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name):", "self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self, vm_ref): for vdi in", "governing permissions and # limitations under the License. # # @author: <NAME>, Locaweb.", "= self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def", "self._network_get_pifs(from_network) ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan)) return net_ref def network_interface_list(self, guest_id): vm_ref =", "get_disks_size(self, vm_ref): size = 0 for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return", "contain at least a network key: {\"network\": \"THE NETWORK NAME\"} \"\"\" vm_ref =", "self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max = str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref,", "/ (1024 * 1024 * 1024), int(sr['virtual_allocation']) / (1024 * 1024 * 1024),", "new_attributes[\"locking_mode\"] = data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id): snap", "data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec =", "= vbd['VDI'] disks.append(vdi) return sorted(disks, key=lambda vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size =", "in writing, software # distributed under the License is distributed on an \"AS", "self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in data and", "\"export job\" ) path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref ) conn", "range(next_device): if device not in devices: next_device = device break vbd_rec = {", "data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label(", "None and len(sr[\"PBDs\"]) > 0: storages.append({'id': sr[\"uuid\"]}) return storages def storage_info(self, storage_id): sr_ref", "entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec", "{ \"VM\": vm_ref, \"device\": str(next_device), \"MAC_autogenerated\": True, \"MAC\": \"\", \"MTU\": \"0\", \"other_config\": {},", "{ \"vcpus_at_startup\" : vcpus, \"vcpus_max\" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) } vcpus_at_startup = str(vcpus[\"vcpus_at_startup\"]) vcpus_max =", "EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid)", "self.get_disks(vm_ref): if disk[\"userdevice\"] == disk_id: return disk entity_info = \"%s - on Guest\"", ") ) def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') )", "self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id),", "vm = self._vm_ref(guest_id) return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown(", "(1024 * 1024), self.get_disks_size(vm_ref) / (1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip,", "def disk_update(self, guest_id, disk_id, data): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if", "disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id) devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref):", "{ \"VM\": vm_ref, \"userdevice\": str(next_device), \"bootable\": False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False,", "False, \"mode\": \"RW\", \"type\": \"Disk\", \"unpluggable\": False, \"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\",", "disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def media_mount(self, guest_id, media_data): vm_ref = self._vm_ref(guest_id)", "= self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id, data): vm_ref = self._vm_ref(guest_id)", "uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def _host_info(self,", "0 for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref,", "in data: vif_record[\"network\"] = self._network_ref(data[\"network\"]) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return", "if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI']", "return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False )", "= self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data: if data[\"active\"]: try:", "= data[\"ipv4_allowed\"] if \"ipv6_allowed\" in data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"]", "{\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref", "vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20)", "language governing permissions and # limitations under the License. # # @author: <NAME>,", "net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description,", "self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name, description, from_network, vlan, other_config={}):", "= self.connection.xenapi.SR.get_by_uuid(storage_id) else: storages = self.connection.xenapi.SR.get_all_records() max_free_space = 0 for sr_ref, record in", "guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\" in guestdata:", "self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else: self.connection.xenapi.VM.set_PV_args(vm_ref, \"\") self.connection.xenapi.VM.set_HVM_boot_params( vm_ref, {\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy(", "storage_ref ) try: conn = httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} )", "\"sm_config\": {}, \"tags\": [] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref)", "str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def snapshot_info(self, guest_id, snapshot_id):", "in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref) def _cd_ref(self, vm_ref): vm =", "data): vm_ref = self._vm_ref(guest_id) devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device", "httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack): state_translation = { \"Running\": \"STARTED\",", "path, vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse() response.status response.read() except socket.error as", "2.0 (the \"License\"); # you may not use this file except in compliance", "else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume(", "= self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"]", "def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref =", "self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024 * 1024 * 1024), int(sr['virtual_allocation']) /", "guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 *", "\"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master) conn.request(\"GET\", path) response", "= str(vcpus[\"vcpus_max\"]) if int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup)", "vm_ref = self._vm_ref(guest_id) cd_ref = self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "If host is slave, connect to master if 'HOST_IS_SLAVE' in str(error): self.poolinfo[\"api_server\"] =", "vdi_rec[\"name_description\"] = data[\"name\"] vdi_ref = self.connection.xenapi.VDI.create(vdi_rec) vbd_rec[\"VDI\"] = vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref,", "# # Unless required by applicable law or agreed to in writing, software", "= self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return ( self.format_for.pool( used_memory / (1024 * 1024),", "\"OpaqueRef:NULL\": host = self.connection.xenapi.host.get_name_label( vm[\"resident_on\"] ) return( self.format_for.guest( vm.get('uuid'), vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) /", "guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False )", "express or implied. # See the License for the specific language governing permissions", "disk[\"userdevice\"] == disk_id: return disk entity_info = \"%s - on Guest\" % (disk_id)", "(not vm.get('is_a_template')): guests.append({'id': vm.get('uuid')}) return guests def guest_info(self, guest_id): vm = self._vm_ref(guest_id) return", "{ \"memory_target\" : memory , \"memory_static_min\" : memory, \"memory_static_max\" : memory } memory_target", "max_free_space = 0 for sr_ref, record in storages.iteritems(): free_space = ( int(record[\"physical_size\"]) -", "either express or implied. # See the License for the specific language governing", "try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: # If host is slave,", "0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except:", "memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target ) if \"cpus\" in guestdata: vcpus", "return [self._disk_info(d) for d in disks] def disk_create(self, guest_id, data): vm_ref = self._vm_ref(guest_id)", "try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def _host_info(self, host_ref):", "data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass", "self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id): return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id)) def tag_create(self,", "device in range(next_device): if device not in devices: next_device = device break vbd_rec", "get_disks(self, vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref in vm['VBDs']: vbd", "other_config={}): return self.connection.xenapi.network.create({\"name_label\": name, \"name_description\": description, \"other_config\": other_config}) def network_vlan_create(self, name, description, from_network,", "guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass", "return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False, False ) def guest_reboot(self, guest_id, force=False): vm_ref = self._vm_ref(guest_id)", "str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\"", "def network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref = self._network_create(name, description, other_config) pif_ref", "simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter import re import errno import socket", "vdi: int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0 for vdi in self.get_disks(vm_ref): size", "data and vif_record[\"ipv6_allowed\"] != data[\"ipv6_allowed\"]: new_attributes[\"ipv6_allowed\"] = data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes)", "memory_target, memory_target ) if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict):", "iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)[\"VDI\"] if iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else: name =", "the License. # You may obtain a copy of the License at #", ") if \"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus =", "{\"Content-Length\": vm_size} ) response = conn.getresponse() response.status response.read() except socket.error as err: if", "= [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd))) next_device = max(devices) + 1 for", "!= \"OpaqueRef:NULL\": guest_metrics = self.connection.xenapi.VM_guest_metrics.\\ get_record(vm[\"guest_metrics\"]) tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys():", "data[\"ipv6_allowed\"] if len(new_attributes) != 0: vif_record.update(new_attributes) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) vif_ref =", "guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory = { \"memory_target\" : memory", "self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except", "\"\") if \"template\" in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template(", "def network_interface_list(self, guest_id): vm_ref = self._vm_ref(guest_id) vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) return [self._network_interface_info(n) for n", "self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data: if data[\"active\"]: try: self.connection.xenapi.VIF.plug(vif_ref)", "force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id):", "1024 * 1024), int(sr['physical_size']) / (1024 * 1024 * 1024) ) ) def", "= self._vm_ref(guest_id) if \"name\" in guestdata: self.connection.xenapi.VM.set_name_label(vm_ref, guestdata[\"name\"]) if \"memory\" in guestdata: memory", "data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"] if", "self.connection.xenapi.VIF.destroy(vif_ref) vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass if \"active\" in data: if", "vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) if \"name\" in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"],", "if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def", "int(vcpus_at_startup) > int(vcpus_max): self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup) else: self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max) self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup) if \"vcpus_number_live\" in", "disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 * 1024 * 1024 new_disk_size", "= self.connection.xenapi.VBD.get_record(vbd_ref) if vbd[\"type\"] == \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref']", "return self.connection.xenapi.VM.clean_reboot(vm_ref) def guest_suspend(self, guest_id): return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id)) def guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id),", "guest_id, data): vm_ref = self._vm_ref(guest_id) devices = [] for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref): devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd)))", "in guestdata: is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref) if guestdata[\"template\"] ^ is_template: self.connection.xenapi.VM.set_is_a_template( vm_ref, guestdata[\"template\"] )", "self.connection.xenapi.host.get_metrics(host_ref) m_rec = self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"])", "disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") * 1024 * 1024", "int(vdi['userdevice'])) def get_disks_size(self, vm_ref): size = 0 for vdi in self.get_disks(vm_ref): size +=", "self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try:", "guestdata[\"name\"]) if \"memory\" in guestdata: memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory =", "from simplestack.hypervisors.base import SimpleStack from simplestack.presenters.formatter import Formatter import re import errno import", "vcpus_at_startup) if \"vcpus_number_live\" in guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters =", "self.get_disks_size(vm_ref) / (1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host )", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus,", "if \"paravirtualized\" in guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args = \"--", "{\"order\": \"dc\"} ) self.connection.xenapi.VM.set_HVM_boot_policy( vm_ref, \"BIOS order\" ) if \"hdd\" in guestdata: disk", "message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref) vm_ref = re.sub(r'<.*?>', \"\", task_rec[\"result\"]) self.connection.xenapi.task.destroy(task_ref) return", "tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks = [] vm", "def guest_export(self, guest_id): vm_ref = self._vm_ref(guest_id) session_ref = self.connection._session # FIXME: get real", "if vbd[\"type\"] == \"CD\": return vbd_ref def _delete_disks(self, vm_ref): for vdi in self.get_disks(vm_ref):", "def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref)", "self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return self._network_interface_info(vif_ref) def network_interface_delete(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref =", "= str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if", ") path = \"/export?session_id=%s&task_id=%s&ref=%s\" % ( session_ref, task_ref, vm_ref ) conn = httplib.HTTPConnection(master)", "for vdi in self.get_disks(vm_ref): size += int(vdi[\"virtual_size\"]) return size def _disk_rec(self, vm_ref, disk_id):", "= \"-- quiet console=hvc0\" else: pv_args = guestdata[\"paravirtualized\"] self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, \"\") self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args) else:", "self._delete_vm(guest_id) def guest_import(self, vm_stream, vm_size, storage_id=None): session_ref = self.connection._session master = self.poolinfo.get(\"api_server\") storage_ref", "\"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"), self.poolinfo.get(\"password\") ) except Exception, error: #", "self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory += int(vm_rec['memory_dynamic_max']) total_memory = 0", "= httplib.HTTPConnection(master) conn.request( \"PUT\", path, vm_stream, {\"Content-Length\": vm_size} ) response = conn.getresponse() response.status", "disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return self._disk_info(disk_rec) def", "return self._vm_info(vm) def guest_shutdown(self, guest_id, force=False): if force: return self.connection.xenapi.VM.hard_shutdown( self._vm_ref(guest_id) ) else:", "err: if err.errno == errno.ECONNRESET: LOG.warning(\"error=CONNRESET action=import message='BUG?'\") else: raise task_rec = self.connection.xenapi.task.get_record(task_ref)", "vm_ref = self._vm_ref(guest_id) devices = [] for vif in self.connection.xenapi.VM.get_VIFs(vm_ref): devices.append(int(self.connection.xenapi.VIF.get_device(vif))) next_device =", "iso_ref == 'OpaqueRef:NULL': return {\"name\": None} else: name = self.connection.xenapi.VDI.get_record(iso_ref)[\"name_label\"] return {\"name\": name}", "* 1024 * 1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\":", "int(sr['physical_utilisation']) / (1024 * 1024 * 1024), int(sr['virtual_allocation']) / (1024 * 1024 *", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# @author: <NAME> (morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb. from simplestack.utils import", "def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self, guest_id):", "on Guest\" % (network_interface_id) raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid)", "storage_ref = sr_ref if vm_size and vm_size > 0 and vm_size > max_free_space:", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import errno import socket import httplib import logging LOG = logging.getLogger('simplestack.hypervisors.xen') class Stack(SimpleStack):", "in data: self.connection.xenapi.VDI.set_name_label( disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in", "def network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def", "[] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id): host_ref", "guestdata: if guestdata[\"paravirtualized\"]: if guestdata[\"paravirtualized\"] is True: pv_args = \"-- quiet console=hvc0\" else:", "{\"name\": name} def network_list(self): net_refs = self.connection.xenapi.network.get_all() ret = [] for net in", "(8 * 1024) self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else:", "tools_up_to_date = guest_metrics[\"PV_drivers_up_to_date\"] if \"0/ip\" in guest_metrics[\"networks\"].keys(): ip = guest_metrics[\"networks\"][\"0/ip\"] host = None", "1024) ) ) def _vm_info(self, vm_ref): vm = self.connection.xenapi.VM.get_record(vm_ref) tools_up_to_date = None ip", "\"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self, name): net_ref = self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) ==", "* 1024), \"type\": \"system\", \"sharable\": False, \"read_only\": False, \"other_config\": {}, \"xenstore_data\": {}, \"sm_config\":", "other_config}) def network_vlan_create(self, name, description, from_network, vlan, other_config={}): net_ref = self._network_create(name, description, other_config)", "\"cpus\" in guestdata: vcpus = guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\"", "disk_rec.get(\"uuid\") ) ) def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label')", "vif_record[\"network\"] != net_refs: new_attributes[\"network\"] = net_refs if \"locking_mode\" in data and vif_record[\"locking_mode\"] !=", "network_interface_info(self, guest_id, network_interface_id): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def", "guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id, data): vm =", "1024 * 1024), int(sr['virtual_allocation']) / (1024 * 1024 * 1024), int(sr['physical_size']) / (1024", "memory = guestdata[\"memory\"] if not isinstance(memory,dict): memory = { \"memory_target\" : memory ,", "/ (1024 * 1024 * 1024) ) ) def _vm_info(self, vm_ref): vm =", "self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref = self._vm_ref(guest_id) disk_rec = self._disk_rec(vm_ref, disk_id) return", "network_info(self, net_ref): return {\"name_label\": self.connection.xenapi.network.get_name_label(net_ref), \"bridge\": self.connection.xenapi.network.get_bridge(net_ref), \"name_description\": self.connection.xenapi.network.get_name_description(net_ref), \"other_config\": self.connection.xenapi.network.get_other_config(net_ref)} def _network_ref(self,", "vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) return self._network_interface_info(vif_ref) def network_interface_update(self, guest_id, network_interface_id,", "raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id): vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref) for vif_ref in", "self.connection.xenapi.VIF.set_qos_algorithm_type( vif_ref, \"ratelimit\" ) self.connection.xenapi.VIF.set_qos_algorithm_params( vif_ref, {\"kbps\": str(rate)} ) else: self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, \"\") return", "not vm_ref: return for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref): snap = self.connection.xenapi.VM.get_record(snap_ref) self._delete_vm(snap[\"uuid\"]) self._delete_disks(vm_ref) self.connection.xenapi.VM.destroy(vm_ref)", "{ \"Running\": \"STARTED\", \"Halted\": \"STOPPED\", \"Suspended\": \"PAUSED\" } def __init__(self, poolinfo): self.connection =", "disk_rec[\"ref\"], data[\"name\"] ) self.connection.xenapi.VDI.set_name_description( disk_rec[\"ref\"], data[\"name\"] ) if \"size\" in data: new_disk_size =", "vm_size} ) response = conn.getresponse() response.status response.read() except socket.error as err: if err.errno", "self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes = {} if \"network\"", "except: LOG.warning(\"uuid=%s action=not_found\" % uuid) return None def _host_info(self, host_ref): host = self.connection.xenapi.host.get_record(host_ref)", "guest_id, tag_name): vm_ref = self._vm_ref(guest_id) self.connection.xenapi.VM.add_tags(vm_ref, tag_name) return self.tag_list(guest_id) def tag_delete(self, guest_id, tag_name):", "def _snapshot_info(self, snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def", "\"%s - on Guest\" % (disk_id) raise EntityNotFound(\"Disk\", entity_info) def _network_interface_ref(self, vm_ref, network_interface_id):", "%s\" % name) return net_ref[0] def _network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref)", "self._network_interface_ref(vm_ref, network_interface_id) try: self.connection.xenapi.VIF.unplug(vif_ref) except: pass self.connection.xenapi.VIF.destroy(vif_ref) def snapshot_list(self, guest_id): snaps = [", "for net in net_refs: ret.append({\"id\": net}) return ret def network_info(self, net_ref): return {\"name_label\":", "hdd - disks_size + int(disk[\"virtual_size\"]) self.connection.xenapi.VDI.resize(disk[\"ref\"], str(new_disk_size)) return self._vm_info(self._vm_ref(guest_id)) def guest_delete(self, guest_id): self._delete_vm(guest_id)", "snapshot_name: snapshot_name = str(datetime.datetime.now()) snap = self.connection.xenapi.VM.snapshot( self._vm_ref(guest_id), snapshot_name ) return self._snapshot_info(snap) def", "guest_id, force=False): vm_ref = self._vm_ref(guest_id) if force: return self.connection.xenapi.VM.hard_reboot(vm_ref) else: return self.connection.xenapi.VM.clean_reboot(vm_ref) def", "data): vm_ref = self._vm_ref(guest_id) vif_ref = self._network_interface_ref(vm_ref, network_interface_id) vif_record = self.connection.xenapi.VIF.get_record(vif_ref) new_attributes =", "guest_list(self): guests = [] for vm in self.connection.xenapi.VM.get_all_records().values(): if (not vm.get('is_a_snapshot')) and (not", "= self.connection.xenapi.network.get_by_name_label(name) if len(net_ref) == 0: raise EntityNotFound(\"NetworkInterface\", \"Unknown network: %s\" % name)", "str(error): self.poolinfo[\"api_server\"] = str(error).split(\"'\")[3] self.connect() else: raise error def logout(self): self.connection.xenapi.session.logout() def pool_info(self):", "== \"Disk\": vdi = self.connection.xenapi.VDI.get_record(vbd['VDI']) vdi['userdevice'] = vbd['userdevice'] vdi['ref'] = vbd['VDI'] disks.append(vdi) return", "except in compliance with the License. # You may obtain a copy of", "1024), self.get_disks_size(vm_ref) / (1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host", "<NAME> (morellon), Locaweb. # @author: <NAME> (PotHix), Locaweb. from simplestack.utils import XenAPI from", "memory_target = str(int(memory[\"memory_target\"])<<20) memory_static_min = str(int(memory[\"memory_static_min\"])<<20) memory_static_max = str(int(memory[\"memory_static_max\"])<<20) self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max,", "self.connection.xenapi.VM.set_memory_limits( vm_ref, memory_static_min, memory_static_max, memory_target, memory_target ) if \"memory_target_live\" in guestdata: memory_target =", "vdi_rec[\"SR\"] = disks[0][\"SR\"] if \"name\" in data: vdi_rec[\"name_label\"] = data[\"name\"] vdi_rec[\"name_description\"] = data[\"name\"]", "snapshot_ref): snapshot = self.connection.xenapi.VM.get_record(snapshot_ref) return( self.format_for.snapshot( snapshot.get('uuid'), snapshot.get('name_label') ) ) def _network_interface_info(self, vif_ref):", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self.connection.xenapi.VM.remove_tags(vm_ref, tag_name) def get_disks(self, vm_ref): disks = [] vm = self.connection.xenapi.VM.get_record(vm_ref) for vbd_ref", "= self.get_disks(vm_ref) return [self._disk_info(d) for d in disks] def disk_create(self, guest_id, data): vm_ref", "storages def storage_info(self, storage_id): sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id) return self._storage_info(sr_ref) def guest_list(self): guests =", "self._snapshot_info(snap) def snapshot_revert(self, guest_id, snapshot_id): self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id)) def snapshot_delete(self, guest_id, snapshot_id): self._delete_vm(snapshot_id) def tag_list(self,", "= self.connection.xenapi.host_metrics.get_record(met_ref) total_memory += int(m_rec['memory_total']) pool_rec = self.connection.xenapi.pool.get_all_records().values()[0] master_rec = self.connection.xenapi.host.get_record(pool_rec[\"master\"]) return (", "guest_resume(self, guest_id): return self.connection.xenapi.VM.resume( self._vm_ref(guest_id), False, False ) def guest_clone(self, guest_id, data): vm", "= guestdata[\"cpus\"] if not isinstance(vcpus,dict): vcpus = { \"vcpus_at_startup\" : vcpus, \"vcpus_max\" :", "def _network_get_pifs(self, name): ref = self._network_ref(name) return self.connection.xenapi.network.get_PIFs(ref) def _network_create(self, name, description, other_config={}):", "# Copyright 2013 Locaweb. # All Rights Reserved. # # Licensed under the", ") if free_space > max_free_space: max_free_space = free_space storage_ref = sr_ref if vm_size", "self._cd_ref(vm_ref) if media_data.get(\"name\") and media_data[\"name\"] != \"\": self.media_unmount(guest_id) iso_ref = self.connection.xenapi.VDI.get_by_name_label( media_data[\"name\"] )[0]", "\"PAUSED\" } def __init__(self, poolinfo): self.connection = False self.poolinfo = poolinfo self.format_for =", "Exception, error: # If host is slave, connect to master if 'HOST_IS_SLAVE' in", "= {} if \"network\" in data: net_refs = self._network_ref(data[\"network\"]) if vif_record[\"network\"] != net_refs:", "[] }) if data.get(\"storage_id\"): raise FeatureNotImplemented() else: disks = self.get_disks(vm_ref) vdi_rec[\"SR\"] = disks[0][\"SR\"]", ") if \"memory_target_live\" in guestdata: memory_target = str(int(guestdata[\"memory_target_live\"])<<20) self.connection.xenapi.VM.set_memory_dynamic_range( vm_ref, memory_target, memory_target )", "guestdata: self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata[\"vcpus_number_live\"])) if \"vcpu_settings\" in guestdata: parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters)", "_storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'], sr['name_label'], sr['type'], int(sr['physical_utilisation']) / (1024", "self.connection.xenapi.session.logout() def pool_info(self): used_memory = 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template']", "raise EntityNotFound(\"NetworkInterface\", entity_info) def _vm_ref(self, uuid): try: return self.connection.xenapi.VM.get_by_uuid(uuid) except: LOG.warning(\"uuid=%s action=not_found\" %", "\"hdd\" in guestdata: disk = self.get_disks(vm_ref)[-1] disks_size = self.get_disks_size(vm_ref) hdd = guestdata.get(\"hdd\") *", "= data[\"locking_mode\"] if \"ipv4_allowed\" in data and vif_record[\"ipv4_allowed\"] != data[\"ipv4_allowed\"]: new_attributes[\"ipv4_allowed\"] = data[\"ipv4_allowed\"]", "vm.get('name_label'), int(vm.get('VCPUs_at_startup')), int(vm.get('memory_static_max')) / (1024 * 1024), self.get_disks_size(vm_ref) / (1024 * 1024 *", "\"empty\": False, \"other_config\": {}, \"qos_algorithm_type\": \"\", \"qos_algorithm_params\": {} } vdi_rec = ({ \"name_label\":", "self.connect() def connect(self): self.connection = XenAPI.Session( \"https://%s/\" % self.poolinfo.get(\"api_server\") ) try: self.connection.xenapi.login_with_password( self.poolinfo.get(\"username\"),", "host['name_label'], host['address'] ) ) def _storage_info(self, sr_ref): sr = self.connection.xenapi.SR.get_record(sr_ref) return( self.format_for.storage( sr['uuid'],", "/ (1024 * 1024 * 1024), vm[\"PV_args\"], tools_up_to_date, ip, self.state_translation[vm.get('power_state')], host ) )", ") else: return self.connection.xenapi.VM.clean_shutdown( self._vm_ref(guest_id) ) def guest_start(self, guest_id): return self.connection.xenapi.VM.start( self._vm_ref(guest_id), False,", "free_space = ( int(record[\"physical_size\"]) - int(record[\"virtual_allocation\"]) ) if free_space > max_free_space: max_free_space =", "vif_ref = self.connection.xenapi.VIF.create(vif_record) try: self.connection.xenapi.VIF.plug(vif_ref) except: pass return self._network_interface_info(vif_ref) def network_interface_info(self, guest_id, network_interface_id):", "= [] for h in self.connection.xenapi.host.get_all_records().values(): hosts.append({'id': h[\"uuid\"]}) return hosts def host_info(self, host_id):", "parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref) parameters.update(guestdata[\"vcpu_settings\"]) self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters) if \"ha_enabled\" in guestdata: if guestdata[\"ha_enabled\"]: self.connection.xenapi.VM.set_ha_restart_priority(", "snaps = [ self._snapshot_info(s) for s in self.connection.xenapi.VM.get_snapshots( self._vm_ref(guest_id) ) ] return snaps", "Disk\", \"name_description\": \"Simplestack generated disk\", \"virtual_size\": str(data[\"size\"] * 1024 * 1024 * 1024),", "# # @author: <NAME>, Locaweb. # @author: <NAME> (morellon), Locaweb. # @author: <NAME>", "= 0 for vm_rec in self.connection.xenapi.VM.get_all_records().values(): if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']: used_memory", "vdi_ref self.connection.xenapi.VBD.create(vbd_rec) disk_rec = self._disk_rec(vm_ref, next_device) return self._disk_info(disk_rec) def disk_info(self, guest_id, disk_id): vm_ref" ]
[ "'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL: 'bold #002CC3', MToken.UNKNOWN: 'bold #000000', }", "'#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413',", "MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff'", "'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class", "#6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style):", "#6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style = ''", "'#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff' styles", "'#ffffff' styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000',", "import MToken class MathematicaStyle(Style): default_style = '' background_color = '#fefefe' styles = {", "#3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic", "'#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR:", "under the MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer import MToken", "'#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F',", "#000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000',", "(https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer import MToken class MathematicaStyle(Style): default_style =", "'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold", "#aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic", "#438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL: 'bold #002CC3', MToken.UNKNOWN: 'bold", "pygments.style import Style from mathematica.lexer import MToken class MathematicaStyle(Style): default_style = '' background_color", "= { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a',", "'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic #438958',", "MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer import MToken class MathematicaStyle(Style):", "MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING:", "MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic", "{ MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91',", "from mathematica.lexer import MToken class MathematicaStyle(Style): default_style = '' background_color = '#fefefe' styles", "'#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT:", "'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold", "'bold italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL: 'bold #002CC3',", "= '' background_color = '#ffffff' styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold", "default_style = '' background_color = '#ffffff' styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT:", "'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666',", "MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff' styles =", "MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL: 'bold", "# Copyright (c) 2016 rsmenon # Licensed under the MIT License (https://opensource.org/licenses/MIT) from", "styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE:", "Licensed under the MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer import", "class MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff' styles = { MToken.BUILTIN: 'bold", "import Style from mathematica.lexer import MToken class MathematicaStyle(Style): default_style = '' background_color =", "background_color = '#ffffff' styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP:", "} class MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff' styles = { MToken.BUILTIN:", "'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT:", "'#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066',", "#000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666',", "#000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL:", "MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR:", "= '#ffffff' styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold", "MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic", "#000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING:", "utf-8 -*- # Copyright (c) 2016 rsmenon # Licensed under the MIT License", "License (https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer import MToken class MathematicaStyle(Style): default_style", "MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b',", "2016 rsmenon # Licensed under the MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style", "#999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000',", "= { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold", "#666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold", "background_color = '#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555',", "<gh_stars>10-100 # -*- coding: utf-8 -*- # Copyright (c) 2016 rsmenon # Licensed", "the MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer import MToken class", "MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE:", "italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL: 'bold #002CC3', MToken.UNKNOWN:", "'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold", "MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN:", "MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958', MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold", "mathematica.lexer import MToken class MathematicaStyle(Style): default_style = '' background_color = '#fefefe' styles =", "coding: utf-8 -*- # Copyright (c) 2016 rsmenon # Licensed under the MIT", "class MathematicaStyle(Style): default_style = '' background_color = '#fefefe' styles = { MToken.BUILTIN: '#353f42',", "= '' background_color = '#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa',", "MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN:", "Copyright (c) 2016 rsmenon # Licensed under the MIT License (https://opensource.org/licenses/MIT) from pygments.style", "MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER:", "MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style = '' background_color", "-*- coding: utf-8 -*- # Copyright (c) 2016 rsmenon # Licensed under the", "MToken class MathematicaStyle(Style): default_style = '' background_color = '#fefefe' styles = { MToken.BUILTIN:", "'' background_color = '#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP:", "= '#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE:", "MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff' styles = { MToken.BUILTIN: 'bold #000000',", "MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555',", "MathematicaStyle(Style): default_style = '' background_color = '#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT:", "MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style", "'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE: 'bold #3C7D91', MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold", "default_style = '' background_color = '#fefefe' styles = { MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic", "'' background_color = '#ffffff' styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999',", "# Licensed under the MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style from mathematica.lexer", "from pygments.style import Style from mathematica.lexer import MToken class MathematicaStyle(Style): default_style = ''", "rsmenon # Licensed under the MIT License (https://opensource.org/licenses/MIT) from pygments.style import Style from", "# -*- coding: utf-8 -*- # Copyright (c) 2016 rsmenon # Licensed under", "'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN:", "{ MToken.BUILTIN: '#353f42', MToken.COMMENT: 'italic #aaaaaa', MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER:", "MToken.GROUP: '#555555', MToken.LOCAL_SCOPE: '#5d9066', MToken.MESSAGE: '#ab466a', MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413',", "'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style =", "Style from mathematica.lexer import MToken class MathematicaStyle(Style): default_style = '' background_color = '#fefefe'", "styles = { MToken.BUILTIN: 'bold #000000', MToken.COMMENT: 'bold #999999', MToken.GROUP: 'bold #000000', MToken.LOCAL_SCOPE:", "'#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555',", "'#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', } class MathematicaNotebookStyle(Style): default_style = '' background_color =", "'#555555', } class MathematicaNotebookStyle(Style): default_style = '' background_color = '#ffffff' styles = {", "MToken.SLOT: 'bold italic #438958', MToken.STRING: 'bold #666666', MToken.SYMBOL: 'bold #002CC3', MToken.UNKNOWN: 'bold #000000',", "MToken.NUMBER: '#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL:", "'#b66a4b', MToken.OPERATOR: '#555555', MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1',", "MToken.MESSAGE: 'bold #666666', MToken.NUMBER: 'bold #000000', MToken.OPERATOR: 'bold #000000', MToken.PATTERN: 'bold italic #438958',", "-*- # Copyright (c) 2016 rsmenon # Licensed under the MIT License (https://opensource.org/licenses/MIT)", "MToken.PATTERN: 'italic #6E8413', MToken.SLOT: 'italic #6E8413', MToken.STRING: '#499A9F', MToken.SYMBOL: '#4b78b1', MToken.UNKNOWN: '#555555', }", "(c) 2016 rsmenon # Licensed under the MIT License (https://opensource.org/licenses/MIT) from pygments.style import" ]